[llvm] fb54afd - AMDGPU: Fold fsub [+-0] into fneg when folding source modifiers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 20 16:30:02 PDT 2023


Author: Matt Arsenault
Date: 2023-07-20T19:29:40-04:00
New Revision: fb54afd1b7a5287f521759badf0a72c5ab544ca8

URL: https://github.com/llvm/llvm-project/commit/fb54afd1b7a5287f521759badf0a72c5ab544ca8
DIFF: https://github.com/llvm/llvm-project/commit/fb54afd1b7a5287f521759badf0a72c5ab544ca8.diff

LOG: AMDGPU: Fold fsub [+-0] into fneg when folding source modifiers

This isn't always folded to fneg for a freestanding fsub depending on
the denormal mode. When matching source modifiers, we're implicitly
canonicalizing the input so we can fold it here.

Doesn't bother handling the VOP3P case since it's only relevant with
DAZ, which nobody really uses with f16.

For f64, tests show an existing bug where DAGCombiner tries to respect
the denormal mode for fsub -0, x, but not after it's lowered to fadd
-0, (fneg x). Either the fold is wrong or we shouldn't restrict the
fsub case based on the denormal mode.

https://reviews.llvm.org/D155652

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUGISel.td
    llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
    llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
    llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
    llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
    llvm/lib/Target/AMDGPU/SIInstrInfo.td
    llvm/lib/Target/AMDGPU/SIInstructions.td
    llvm/lib/Target/AMDGPU/VOPCInstructions.td
    llvm/lib/Target/AMDGPU/VOPInstructions.td
    llvm/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
    llvm/test/CodeGen/AMDGPU/fneg-combines.ll
    llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
    llvm/test/CodeGen/AMDGPU/fsub-as-fneg-src-modifier.ll
    llvm/test/CodeGen/AMDGPU/llvm.exp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index 0f3e3c0c2c7eb6..37df4f68c26562 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -31,6 +31,10 @@ def gi_vop3mods :
     GIComplexOperandMatcher<s32, "selectVOP3Mods">,
     GIComplexPatternEquiv<VOP3Mods>;
 
+def gi_vop3modsnoncanonicalizing :
+    GIComplexOperandMatcher<s32, "selectVOP3ModsNonCanonicalizing">,
+    GIComplexPatternEquiv<VOP3ModsNonCanonicalizing>;
+
 def gi_vop3_no_mods :
     GIComplexOperandMatcher<s32, "selectVOP3NoMods">,
     GIComplexPatternEquiv<VOP3NoMods>;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 25e188fb02ca27..85c639e96c3ff2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -2570,6 +2570,7 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
 
 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
                                             unsigned &Mods,
+                                            bool IsCanonicalizing,
                                             bool AllowAbs) const {
   Mods = 0;
   Src = In;
@@ -2577,6 +2578,14 @@ bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
   if (Src.getOpcode() == ISD::FNEG) {
     Mods |= SISrcMods::NEG;
     Src = Src.getOperand(0);
+  } else if (Src.getOpcode() == ISD::FSUB && IsCanonicalizing) {
+    // Fold fsub [+-]0 into fneg. This may not have folded depending on the
+    // denormal mode, but we're implicitly canonicalizing in a source operand.
+    auto *LHS = dyn_cast<ConstantFPSDNode>(Src.getOperand(0));
+    if (LHS && LHS->isZero()) {
+      Mods |= SISrcMods::NEG;
+      Src = Src.getOperand(1);
+    }
   }
 
   if (AllowAbs && Src.getOpcode() == ISD::FABS) {
@@ -2590,7 +2599,20 @@ bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
                                         SDValue &SrcMods) const {
   unsigned Mods;
-  if (SelectVOP3ModsImpl(In, Src, Mods)) {
+  if (SelectVOP3ModsImpl(In, Src, Mods, /*IsCanonicalizing=*/true,
+                         /*AllowAbs=*/true)) {
+    SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
+    return true;
+  }
+
+  return false;
+}
+
+bool AMDGPUDAGToDAGISel::SelectVOP3ModsNonCanonicalizing(
+    SDValue In, SDValue &Src, SDValue &SrcMods) const {
+  unsigned Mods;
+  if (SelectVOP3ModsImpl(In, Src, Mods, /*IsCanonicalizing=*/false,
+                         /*AllowAbs=*/true)) {
     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
     return true;
   }
@@ -2601,7 +2623,9 @@ bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
 bool AMDGPUDAGToDAGISel::SelectVOP3BMods(SDValue In, SDValue &Src,
                                          SDValue &SrcMods) const {
   unsigned Mods;
-  if (SelectVOP3ModsImpl(In, Src, Mods, /* AllowAbs */ false)) {
+  if (SelectVOP3ModsImpl(In, Src, Mods,
+                         /*IsCanonicalizing=*/true,
+                         /*AllowAbs=*/false)) {
     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
     return true;
   }
@@ -2621,7 +2645,9 @@ bool AMDGPUDAGToDAGISel::SelectVINTERPModsImpl(SDValue In, SDValue &Src,
                                                SDValue &SrcMods,
                                                bool OpSel) const {
   unsigned Mods;
-  if (SelectVOP3ModsImpl(In, Src, Mods, /* AllowAbs */ false)) {
+  if (SelectVOP3ModsImpl(In, Src, Mods,
+                         /*IsCanonicalizing=*/true,
+                         /*AllowAbs=*/false)) {
     if (OpSel)
       Mods |= SISrcMods::OP_SEL_0;
     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
@@ -2677,6 +2703,7 @@ bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src,
   unsigned Mods = 0;
   Src = In;
 
+  // TODO: Handle G_FSUB 0 as fneg
   if (Src.getOpcode() == ISD::FNEG) {
     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
     Src = Src.getOperand(0);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
index a08b0fb02e4979..0605baf3a0ccb2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
@@ -216,8 +216,11 @@ class AMDGPUDAGToDAGISel : public SelectionDAGISel {
   bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const;
 
   bool SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &SrcMods,
+                          bool IsCanonicalizing = true,
                           bool AllowAbs = true) const;
   bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
+  bool SelectVOP3ModsNonCanonicalizing(SDValue In, SDValue &Src,
+                                       SDValue &SrcMods) const;
   bool SelectVOP3BMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
   bool SelectVOP3NoMods(SDValue In, SDValue &Src) const;
   bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 63af422028feac..747f9fe2f8aeff 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -3486,8 +3486,10 @@ AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
 
 }
 
-std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
-    MachineOperand &Root, bool AllowAbs, bool OpSel) const {
+std::pair<Register, unsigned>
+AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
+                                              bool IsCanonicalizing,
+                                              bool AllowAbs, bool OpSel) const {
   Register Src = Root.getReg();
   unsigned Mods = 0;
   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
@@ -3496,6 +3498,15 @@ std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
     Src = MI->getOperand(1).getReg();
     Mods |= SISrcMods::NEG;
     MI = getDefIgnoringCopies(Src, *MRI);
+  } else if (MI->getOpcode() == AMDGPU::G_FSUB && IsCanonicalizing) {
+    // Fold fsub [+-]0 into fneg. This may not have folded depending on the
+    // denormal mode, but we're implicitly canonicalizing in a source operand.
+    const ConstantFP *LHS =
+        getConstantFPVRegVal(MI->getOperand(1).getReg(), *MRI);
+    if (LHS && LHS->isZero()) {
+      Mods |= SISrcMods::NEG;
+      Src = MI->getOperand(2).getReg();
+    }
   }
 
   if (AllowAbs && MI->getOpcode() == AMDGPU::G_FABS) {
@@ -3558,7 +3569,9 @@ InstructionSelector::ComplexRendererFns
 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
   Register Src;
   unsigned Mods;
-  std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
+  std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
+                                           /*IsCanonicalizing=*/true,
+                                           /*AllowAbs=*/false);
 
   return {{
       [=](MachineInstrBuilder &MIB) {
@@ -3593,11 +3606,27 @@ AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
   }};
 }
 
+InstructionSelector::ComplexRendererFns
+AMDGPUInstructionSelector::selectVOP3ModsNonCanonicalizing(
+    MachineOperand &Root) const {
+  Register Src;
+  unsigned Mods;
+  std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /*IsCanonicalizing=*/false);
+
+  return {{
+      [=](MachineInstrBuilder &MIB) {
+        MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
+      },
+      [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
+  }};
+}
+
 InstructionSelector::ComplexRendererFns
 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
   Register Src;
   unsigned Mods;
-  std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
+  std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /*IsCanonicalizing=*/true,
+                                           /*AllowAbs=*/false);
 
   return {{
       [=](MachineInstrBuilder &MIB) {
@@ -3633,6 +3662,8 @@ AMDGPUInstructionSelector::selectVOP3PModsImpl(
     MI = MRI.getVRegDef(Src);
   }
 
+  // TODO: Handle G_FSUB 0 as fneg
+
   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
 
@@ -3719,8 +3750,9 @@ AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
   Register Src;
   unsigned Mods;
   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
-                                           /* AllowAbs */ false,
-                                           /* OpSel */ false);
+                                           /*IsCanonicalizing=*/true,
+                                           /*AllowAbs=*/false,
+                                           /*OpSel=*/false);
 
   return {{
       [=](MachineInstrBuilder &MIB) {
@@ -3736,8 +3768,9 @@ AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
   Register Src;
   unsigned Mods;
   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
-                                           /* AllowAbs */ false,
-                                           /* OpSel */ true);
+                                           /*IsCanonicalizing=*/true,
+                                           /*AllowAbs=*/false,
+                                           /*OpSel=*/true);
 
   return {{
       [=](MachineInstrBuilder &MIB) {

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index c09ac8f27a4804..243ff72e29797e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -148,9 +148,10 @@ class AMDGPUInstructionSelector final : public InstructionSelector {
   bool selectSMFMACIntrin(MachineInstr &I) const;
   bool selectWaveAddress(MachineInstr &I) const;
 
-  std::pair<Register, unsigned>
-  selectVOP3ModsImpl(MachineOperand &Root, bool AllowAbs = true,
-                     bool OpSel = false) const;
+  std::pair<Register, unsigned> selectVOP3ModsImpl(MachineOperand &Root,
+                                                   bool IsCanonicalizing = true,
+                                                   bool AllowAbs = true,
+                                                   bool OpSel = false) const;
 
   Register copyToVGPRIfSrcFolded(Register Src, unsigned Mods,
                                  MachineOperand Root, MachineInstr *InsertPt,
@@ -171,6 +172,8 @@ class AMDGPUInstructionSelector final : public InstructionSelector {
   InstructionSelector::ComplexRendererFns
   selectVOP3Mods(MachineOperand &Root) const;
   InstructionSelector::ComplexRendererFns
+  selectVOP3ModsNonCanonicalizing(MachineOperand &Root) const;
+  InstructionSelector::ComplexRendererFns
   selectVOP3BMods(MachineOperand &Root) const;
 
   ComplexRendererFns selectVOP3NoMods(MachineOperand &Root) const;

diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 50199167e50b70..044bc4507d3a8c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1340,7 +1340,16 @@ def DS128Bit8ByteAligned : ComplexPattern<iPTR, 3, "SelectDS128Bit8ByteAligned">
 def MOVRELOffset : ComplexPattern<iPTR, 2, "SelectMOVRELOffset">;
 
 def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">;
+
+// Modifiers for floating point instructions.
 def VOP3Mods  : ComplexPattern<untyped, 2, "SelectVOP3Mods">;
+
+// VOP3 modifiers used for instructions that do not read canonicalized
+// floating point values (i.e. integer operations with FP source
+// modifiers)
+def VOP3ModsNonCanonicalizing : ComplexPattern<untyped, 2,
+  "SelectVOP3ModsNonCanonicalizing">;
+
 def VOP3NoMods : ComplexPattern<untyped, 1, "SelectVOP3NoMods">;
 
 def VOP3OMods : ComplexPattern<untyped, 3, "SelectVOP3OMods">;

diff  --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 76b4d83866df57..c05692ad521698 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -1128,8 +1128,8 @@ def : GCNPat <
 >;
 
 class VOPSelectModsPat <ValueType vt> : GCNPat <
-  (vt (select i1:$src0, (VOP3Mods vt:$src1, i32:$src1_mods),
-                        (VOP3Mods vt:$src2, i32:$src2_mods))),
+  (vt (select i1:$src0, (VOP3ModsNonCanonicalizing vt:$src1, i32:$src1_mods),
+                        (VOP3ModsNonCanonicalizing vt:$src2, i32:$src2_mods))),
   (V_CNDMASK_B32_e64 FP32InputMods:$src2_mods, VSrc_b32:$src2,
                      FP32InputMods:$src1_mods, VSrc_b32:$src1, SSrc_i1:$src0)
 >;

diff  --git a/llvm/lib/Target/AMDGPU/VOPCInstructions.td b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
index f1c14460e891fc..6fc3d0957dce19 100644
--- a/llvm/lib/Target/AMDGPU/VOPCInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
@@ -831,7 +831,7 @@ class getVOPCClassPat64 <VOPProfile P> {
   list<dag> ret =
     [(set i1:$sdst,
       (AMDGPUfp_class
-        (P.Src0VT (VOP3Mods P.Src0VT:$src0, i32:$src0_modifiers)),
+        (P.Src0VT (VOP3ModsNonCanonicalizing P.Src0VT:$src0, i32:$src0_modifiers)),
         i32:$src1))];
 }
 

diff  --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index c5e1d4f62d0767..3755daf4f9b18e 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -1487,7 +1487,7 @@ include "VOP3PInstructions.td"
 include "VOPDInstructions.td"
 
 class ClassPat<Instruction inst, ValueType vt> : GCNPat <
-  (is_fpclass (vt (VOP3Mods vt:$src0, i32:$src0_mods)), (i32 timm:$mask)),
+  (is_fpclass (vt (VOP3ModsNonCanonicalizing vt:$src0, i32:$src0_mods)), (i32 timm:$mask)),
   (inst i32:$src0_mods, vt:$src0, (V_MOV_B32_e32 timm:$mask))
 >;
 

diff  --git a/llvm/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll b/llvm/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
index 5b1241ffe75c8a..6d923c31889311 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
@@ -752,25 +752,23 @@ define amdgpu_kernel void @div_1_by_minus_x_correctly_rounded(ptr addrspace(1) %
 ; GCN-FLUSH:       ; %bb.0:
 ; GCN-FLUSH-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GCN-FLUSH-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-FLUSH-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GCN-FLUSH-NEXT:    s_load_dword s4, s[0:1], 0x0
 ; GCN-FLUSH-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-FLUSH-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-FLUSH-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; GCN-FLUSH-NEXT:    v_div_scale_f32 v1, s[2:3], v0, v0, 1.0
-; GCN-FLUSH-NEXT:    v_div_scale_f32 v2, vcc, 1.0, v0, 1.0
-; GCN-FLUSH-NEXT:    v_rcp_f32_e32 v3, v1
+; GCN-FLUSH-NEXT:    v_div_scale_f32 v0, s[2:3], -s4, -s4, 1.0
+; GCN-FLUSH-NEXT:    v_div_scale_f32 v1, vcc, 1.0, -s4, 1.0
+; GCN-FLUSH-NEXT:    v_rcp_f32_e32 v2, v0
 ; GCN-FLUSH-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
-; GCN-FLUSH-NEXT:    v_fma_f32 v4, -v1, v3, 1.0
-; GCN-FLUSH-NEXT:    v_fma_f32 v3, v4, v3, v3
-; GCN-FLUSH-NEXT:    v_mul_f32_e32 v4, v2, v3
-; GCN-FLUSH-NEXT:    v_fma_f32 v5, -v1, v4, v2
-; GCN-FLUSH-NEXT:    v_fma_f32 v4, v5, v3, v4
-; GCN-FLUSH-NEXT:    v_fma_f32 v1, -v1, v4, v2
+; GCN-FLUSH-NEXT:    v_fma_f32 v3, -v0, v2, 1.0
+; GCN-FLUSH-NEXT:    v_fma_f32 v2, v3, v2, v2
+; GCN-FLUSH-NEXT:    v_mul_f32_e32 v3, v1, v2
+; GCN-FLUSH-NEXT:    v_fma_f32 v4, -v0, v3, v1
+; GCN-FLUSH-NEXT:    v_fma_f32 v3, v4, v2, v3
+; GCN-FLUSH-NEXT:    v_fma_f32 v0, -v0, v3, v1
 ; GCN-FLUSH-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
-; GCN-FLUSH-NEXT:    v_div_fmas_f32 v1, v1, v3, v4
-; GCN-FLUSH-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-FLUSH-NEXT:    v_div_fixup_f32 v0, v1, v0, 1.0
-; GCN-FLUSH-NEXT:    global_store_dword v2, v0, s[0:1]
+; GCN-FLUSH-NEXT:    v_div_fmas_f32 v0, v0, v2, v3
+; GCN-FLUSH-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-FLUSH-NEXT:    v_div_fixup_f32 v0, v0, -s4, 1.0
+; GCN-FLUSH-NEXT:    global_store_dword v1, v0, s[0:1]
 ; GCN-FLUSH-NEXT:    s_endpgm
   %load = load float, ptr addrspace(1) %arg, align 4
   %neg = fsub float -0.000000e+00, %load
@@ -805,25 +803,23 @@ define amdgpu_kernel void @div_minus_1_by_minus_x_correctly_rounded(ptr addrspac
 ; GCN-FLUSH:       ; %bb.0:
 ; GCN-FLUSH-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GCN-FLUSH-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-FLUSH-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GCN-FLUSH-NEXT:    s_load_dword s4, s[0:1], 0x0
 ; GCN-FLUSH-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-FLUSH-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-FLUSH-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; GCN-FLUSH-NEXT:    v_div_scale_f32 v1, s[2:3], v0, v0, -1.0
-; GCN-FLUSH-NEXT:    v_div_scale_f32 v2, vcc, -1.0, v0, -1.0
-; GCN-FLUSH-NEXT:    v_rcp_f32_e32 v3, v1
+; GCN-FLUSH-NEXT:    v_div_scale_f32 v0, s[2:3], -s4, -s4, -1.0
+; GCN-FLUSH-NEXT:    v_div_scale_f32 v1, vcc, -1.0, -s4, -1.0
+; GCN-FLUSH-NEXT:    v_rcp_f32_e32 v2, v0
 ; GCN-FLUSH-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
-; GCN-FLUSH-NEXT:    v_fma_f32 v4, -v1, v3, 1.0
-; GCN-FLUSH-NEXT:    v_fma_f32 v3, v4, v3, v3
-; GCN-FLUSH-NEXT:    v_mul_f32_e32 v4, v2, v3
-; GCN-FLUSH-NEXT:    v_fma_f32 v5, -v1, v4, v2
-; GCN-FLUSH-NEXT:    v_fma_f32 v4, v5, v3, v4
-; GCN-FLUSH-NEXT:    v_fma_f32 v1, -v1, v4, v2
+; GCN-FLUSH-NEXT:    v_fma_f32 v3, -v0, v2, 1.0
+; GCN-FLUSH-NEXT:    v_fma_f32 v2, v3, v2, v2
+; GCN-FLUSH-NEXT:    v_mul_f32_e32 v3, v1, v2
+; GCN-FLUSH-NEXT:    v_fma_f32 v4, -v0, v3, v1
+; GCN-FLUSH-NEXT:    v_fma_f32 v3, v4, v2, v3
+; GCN-FLUSH-NEXT:    v_fma_f32 v0, -v0, v3, v1
 ; GCN-FLUSH-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
-; GCN-FLUSH-NEXT:    v_div_fmas_f32 v1, v1, v3, v4
-; GCN-FLUSH-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-FLUSH-NEXT:    v_div_fixup_f32 v0, v1, v0, -1.0
-; GCN-FLUSH-NEXT:    global_store_dword v2, v0, s[0:1]
+; GCN-FLUSH-NEXT:    v_div_fmas_f32 v0, v0, v2, v3
+; GCN-FLUSH-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-FLUSH-NEXT:    v_div_fixup_f32 v0, v0, -s4, -1.0
+; GCN-FLUSH-NEXT:    global_store_dword v1, v0, s[0:1]
 ; GCN-FLUSH-NEXT:    s_endpgm
   %load = load float, ptr addrspace(1) %arg, align 4
   %neg = fsub float -0.000000e+00, %load

diff  --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
index 028ea473e8959d..05e980c91516e9 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -2608,10 +2608,9 @@ bb:
 }
 
 ; This expects denormal flushing, so can't turn this fmul into fneg
-; TODO: Keeping this as fmul saves encoding size
 ; GCN-LABEL: {{^}}nnan_fmul_neg1_to_fneg:
-; GCN: v_sub_f32_e32 [[TMP:v[0-9]+]], 0x80000000, v0
-; GCN-NEXT: v_mul_f32_e32 v0, [[TMP]], v1
+; GCN: s_waitcnt
+; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
 define float @nnan_fmul_neg1_to_fneg(float %x, float %y) #0 {
   %mul = fmul float %x, -1.0
   %add = fmul nnan float %mul, %y
@@ -2631,8 +2630,9 @@ define float @denormal_fmul_neg1_to_fneg(float %x, float %y) {
 
 ; know the source can't be an snan
 ; GCN-LABEL: {{^}}denorm_snan_fmul_neg1_to_fneg:
-; GCN: v_mul_f32_e64 [[TMP:v[0-9]+]], v0, -v0
-; GCN: v_mul_f32_e32 v0, [[TMP]], v1
+; GCN: s_waitcnt
+; GCN-NEXT: v_mul_f32_e64 [[TMP:v[0-9]+]], v0, -v0
+; GCN-NEXT: v_mul_f32_e32 v0, [[TMP]], v1
 ; GCN-NEXT: s_setpc_b64
 define float @denorm_snan_fmul_neg1_to_fneg(float %x, float %y) {
   %canonical = fmul float %x, %x
@@ -2642,9 +2642,9 @@ define float @denorm_snan_fmul_neg1_to_fneg(float %x, float %y) {
 }
 
 ; GCN-LABEL: {{^}}flush_snan_fmul_neg1_to_fneg:
-; GCN: v_mul_f32_e32 [[TMP0:v[0-9]+]], 1.0, v0
-; GCN: v_sub_f32_e32 [[TMP1:v[0-9]+]], 0x80000000, [[TMP0]]
-; GCN-NEXT: v_mul_f32_e32 v0, [[TMP1]], v1
+; GCN: s_waitcnt
+; GCN-NEXT: v_mul_f32_e32 [[TMP:v[0-9]+]], 1.0, v0
+; GCN-NEXT: v_mul_f32_e64 v0, -[[TMP]], v1
 define float @flush_snan_fmul_neg1_to_fneg(float %x, float %y) #0 {
   %quiet = call float @llvm.canonicalize.f32(float %x)
   %mul = fmul float %quiet, -1.0

diff  --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
index 59a09a8d77a26c..f78b302761ed28 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
@@ -2641,8 +2641,7 @@ define float @nnan_fmul_neg1_to_fneg(float %x, float %y) #0 {
 ; GCN-LABEL: nnan_fmul_neg1_to_fneg:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %mul = fmul float %x, -1.0
   %add = fmul nnan float %mul, %y
@@ -2681,8 +2680,7 @@ define float @flush_snan_fmul_neg1_to_fneg(float %x, float %y) #0 {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %quiet = call float @llvm.canonicalize.f32(float %x)
   %mul = fmul float %quiet, -1.0

diff  --git a/llvm/test/CodeGen/AMDGPU/fsub-as-fneg-src-modifier.ll b/llvm/test/CodeGen/AMDGPU/fsub-as-fneg-src-modifier.ll
index 561a73b54b3bd1..0b8f4722f507d3 100644
--- a/llvm/test/CodeGen/AMDGPU/fsub-as-fneg-src-modifier.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsub-as-fneg-src-modifier.ll
@@ -49,24 +49,36 @@ define float @no_fold_f32_fsub_into_fneg_modifier_ieee_commuted(float %v0, float
 }
 
 define float @fold_f32_fsub_into_fneg_modifier_ieee_pos0(float %v0, float %v1) #0 {
-; CHECK-LABEL: fold_f32_fsub_into_fneg_modifier_ieee_pos0:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_sub_f32_e32 v0, 0, v0
-; CHECK-NEXT:    v_mul_f32_e32 v0, v0, v1
-; CHECK-NEXT:    s_setpc_b64 s[30:31]
+; SDAG-LABEL: fold_f32_fsub_into_fneg_modifier_ieee_pos0:
+; SDAG:       ; %bb.0:
+; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fold_f32_fsub_into_fneg_modifier_ieee_pos0:
+; GISEL:       ; %bb.0:
+; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT:    v_sub_f32_e32 v0, 0, v0
+; GISEL-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   %sub = fsub float 0.0, %v0
   %mul = fmul float %sub, %v1
   ret float %mul
 }
 
 define float @fold_f32_fsub_into_fneg_modifier_daz_pos0(float %v0, float %v1) #1 {
-; CHECK-LABEL: fold_f32_fsub_into_fneg_modifier_daz_pos0:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_sub_f32_e32 v0, 0, v0
-; CHECK-NEXT:    v_mul_f32_e32 v0, v0, v1
-; CHECK-NEXT:    s_setpc_b64 s[30:31]
+; SDAG-LABEL: fold_f32_fsub_into_fneg_modifier_daz_pos0:
+; SDAG:       ; %bb.0:
+; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fold_f32_fsub_into_fneg_modifier_daz_pos0:
+; GISEL:       ; %bb.0:
+; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT:    v_sub_f32_e32 v0, 0, v0
+; GISEL-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   %sub = fsub float 0.0, %v0
   %mul = fmul float %sub, %v1
   ret float %mul
@@ -113,8 +125,7 @@ define float @fold_f32_fsub_into_fneg_modifier_daz(float %v0, float %v1) #1 {
 ; SDAG-LABEL: fold_f32_fsub_into_fneg_modifier_daz:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; SDAG-NEXT:    v_mul_f32_e32 v0, v0, v1
+; SDAG-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_f32_fsub_into_fneg_modifier_daz:
@@ -150,8 +161,7 @@ define float @fold_f32_fsub_into_fneg_modifier_daz_nsz(float %v0, float %v1) #1
 ; SDAG-LABEL: fold_f32_fsub_into_fneg_modifier_daz_nsz:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; SDAG-NEXT:    v_mul_f32_e32 v0, v0, v1
+; SDAG-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_f32_fsub_into_fneg_modifier_daz_nsz:
@@ -169,8 +179,7 @@ define float @fold_f32_fsub_into_fneg_modifier_dynamic(float %v0, float %v1) #2
 ; SDAG-LABEL: fold_f32_fsub_into_fneg_modifier_dynamic:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; SDAG-NEXT:    v_mul_f32_e32 v0, v0, v1
+; SDAG-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_f32_fsub_into_fneg_modifier_dynamic:
@@ -188,8 +197,7 @@ define float @fold_f32_fsub_into_fneg_modifier_dynamic_nsz(float %v0, float %v1)
 ; SDAG-LABEL: fold_f32_fsub_into_fneg_modifier_dynamic_nsz:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; SDAG-NEXT:    v_mul_f32_e32 v0, v0, v1
+; SDAG-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_f32_fsub_into_fneg_modifier_dynamic_nsz:
@@ -228,10 +236,8 @@ define <2 x float> @fold_v2f32_fsub_into_fneg_modifier_daz(<2 x float> %v0, <2 x
 ; SDAG-LABEL: fold_v2f32_fsub_into_fneg_modifier_daz:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f32_e32 v1, 0x80000000, v1
-; SDAG-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; SDAG-NEXT:    v_mul_f32_e32 v0, v0, v2
-; SDAG-NEXT:    v_mul_f32_e32 v1, v1, v3
+; SDAG-NEXT:    v_mul_f32_e64 v0, -v0, v2
+; SDAG-NEXT:    v_mul_f32_e64 v1, -v1, v3
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_v2f32_fsub_into_fneg_modifier_daz:
@@ -272,10 +278,8 @@ define <2 x float> @fold_v2f32_fsub_into_fneg_modifier_daz_nsz(<2 x float> %v0,
 ; SDAG-LABEL: fold_v2f32_fsub_into_fneg_modifier_daz_nsz:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f32_e32 v1, 0x80000000, v1
-; SDAG-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; SDAG-NEXT:    v_mul_f32_e32 v0, v0, v2
-; SDAG-NEXT:    v_mul_f32_e32 v1, v1, v3
+; SDAG-NEXT:    v_mul_f32_e64 v0, -v0, v2
+; SDAG-NEXT:    v_mul_f32_e64 v1, -v1, v3
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_v2f32_fsub_into_fneg_modifier_daz_nsz:
@@ -295,10 +299,8 @@ define <2 x float> @fold_v2f32_fsub_into_fneg_modifier_dynamic(<2 x float> %v0,
 ; SDAG-LABEL: fold_v2f32_fsub_into_fneg_modifier_dynamic:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f32_e32 v1, 0x80000000, v1
-; SDAG-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; SDAG-NEXT:    v_mul_f32_e32 v0, v0, v2
-; SDAG-NEXT:    v_mul_f32_e32 v1, v1, v3
+; SDAG-NEXT:    v_mul_f32_e64 v0, -v0, v2
+; SDAG-NEXT:    v_mul_f32_e64 v1, -v1, v3
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_v2f32_fsub_into_fneg_modifier_dynamic:
@@ -318,10 +320,8 @@ define <2 x float> @fold_v2f32_fsub_into_fneg_modifier_dynamic_nsz(<2 x float> %
 ; SDAG-LABEL: fold_v2f32_fsub_into_fneg_modifier_dynamic_nsz:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f32_e32 v1, 0x80000000, v1
-; SDAG-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
-; SDAG-NEXT:    v_mul_f32_e32 v0, v0, v2
-; SDAG-NEXT:    v_mul_f32_e32 v1, v1, v3
+; SDAG-NEXT:    v_mul_f32_e64 v0, -v0, v2
+; SDAG-NEXT:    v_mul_f32_e64 v1, -v1, v3
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_v2f32_fsub_into_fneg_modifier_dynamic_nsz:
@@ -360,8 +360,7 @@ define half @fold_f16_fsub_into_fneg_modifier_daz(half %v0, half %v1) #1 {
 ; SDAG-LABEL: fold_f16_fsub_into_fneg_modifier_daz:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f16_e32 v0, 0x8000, v0
-; SDAG-NEXT:    v_mul_f16_e32 v0, v0, v1
+; SDAG-NEXT:    v_mul_f16_e64 v0, -v0, v1
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_f16_fsub_into_fneg_modifier_daz:
@@ -397,8 +396,7 @@ define half @fold_f16_fsub_into_fneg_modifier_daz_nsz(half %v0, half %v1) #1 {
 ; SDAG-LABEL: fold_f16_fsub_into_fneg_modifier_daz_nsz:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f16_e32 v0, 0x8000, v0
-; SDAG-NEXT:    v_mul_f16_e32 v0, v0, v1
+; SDAG-NEXT:    v_mul_f16_e64 v0, -v0, v1
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_f16_fsub_into_fneg_modifier_daz_nsz:
@@ -416,8 +414,7 @@ define half @fold_f16_fsub_into_fneg_modifier_dynamic(half %v0, half %v1) #2 {
 ; SDAG-LABEL: fold_f16_fsub_into_fneg_modifier_dynamic:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f16_e32 v0, 0x8000, v0
-; SDAG-NEXT:    v_mul_f16_e32 v0, v0, v1
+; SDAG-NEXT:    v_mul_f16_e64 v0, -v0, v1
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_f16_fsub_into_fneg_modifier_dynamic:
@@ -435,8 +432,7 @@ define half @fold_f16_fsub_into_fneg_modifier_dynamic_nsz(half %v0, half %v1) #2
 ; SDAG-LABEL: fold_f16_fsub_into_fneg_modifier_dynamic_nsz:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f16_e32 v0, 0x8000, v0
-; SDAG-NEXT:    v_mul_f16_e32 v0, v0, v1
+; SDAG-NEXT:    v_mul_f16_e64 v0, -v0, v1
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_f16_fsub_into_fneg_modifier_dynamic_nsz:
@@ -1288,10 +1284,9 @@ define amdgpu_gfx float @fold_f16_fsub_into_fneg_modifier_interp_daz(float %v0,
 ; SDAG-LABEL: fold_f16_fsub_into_fneg_modifier_interp_daz:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_sub_f32_e32 v0, 0x80000000, v0
 ; SDAG-NEXT:    s_mov_b32 m0, s4
 ; SDAG-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 3
-; SDAG-NEXT:    v_interp_p1ll_f16 v0, v0, attr2.y
+; SDAG-NEXT:    v_interp_p1ll_f16 v0, -v0, attr2.y
 ; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: fold_f16_fsub_into_fneg_modifier_interp_daz:

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
index 932a9969da8241..36d5326b0a3eba 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
@@ -5793,8 +5793,8 @@ define float @v_exp_f32_undef() {
 ; VI-SDAG:       ; %bb.0:
 ; VI-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-SDAG-NEXT:    v_rndne_f32_e32 v0, 0
-; VI-SDAG-NEXT:    v_sub_f32_e32 v1, 0, v0
-; VI-SDAG-NEXT:    v_add_f32_e32 v1, 0x7fc00000, v1
+; VI-SDAG-NEXT:    s_mov_b32 s4, 0x7fc00000
+; VI-SDAG-NEXT:    v_add_f32_e64 v1, -v0, s4
 ; VI-SDAG-NEXT:    v_exp_f32_e32 v1, v1
 ; VI-SDAG-NEXT:    v_cvt_i32_f32_e32 v0, v0
 ; VI-SDAG-NEXT:    v_ldexp_f32 v0, v1, v0


        


More information about the llvm-commits mailing list