[PATCH] D14909: [X86][FMA] Optimize FNEG(FMUL) Patterns
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Nov 22 11:41:46 PST 2015
RKSimon created this revision.
RKSimon added reviewers: spatel, delena, qcolombet.
RKSimon added a subscriber: llvm-commits.
RKSimon set the repository for this revision to rL LLVM.
On FMA targets, we can avoid having to load a constant to negate a float/double multiply by instead using a FNMADD (-(X*Y)-0)
Note: As Sanjay mentioned in his bug report, although this is consistently faster (by avoiding the constant load), this does increase register pressure by requiring us to create a zero register. I'm not sure how best to qualify this if people think its a problem. Only running with optsize doesn't really help us - we MAY reduce constantpool size (if no other FNEG are present) but we MAY also increase code size handling extra stack traffic. We do have precedent for this: we use blendps to zero out elements instead of using the slower insertps; I'm sure there are plenty of other examples.
Fix for PR24366
Repository:
rL LLVM
http://reviews.llvm.org/D14909
Files:
lib/Target/X86/X86ISelLowering.cpp
test/CodeGen/X86/fma_patterns.ll
Index: test/CodeGen/X86/fma_patterns.ll
===================================================================
--- test/CodeGen/X86/fma_patterns.ll
+++ test/CodeGen/X86/fma_patterns.ll
@@ -599,4 +599,40 @@
ret <4 x float> %a
}
+; (fneg (fmul x, y)) -> (fnmadd x, y, 0)
+
+define double @test_f64_fneg_fmul(double %x, double %y) #0 {
+; CHECK_FMA-LABEL: test_f64_fneg_fmul:
+; CHECK_FMA: # BB#0:
+; CHECK_FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK_FMA-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0
+; CHECK_FMA-NEXT: retq
+;
+; CHECK_FMA4-LABEL: test_f64_fneg_fmul:
+; CHECK_FMA4: # BB#0:
+; CHECK_FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK_FMA4-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK_FMA4-NEXT: retq
+ %m = fmul double %x, %y
+ %n = fsub double -0.0, %m
+ ret double %n
+}
+
+define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK_FMA-LABEL: test_v4f32_fneg_fmul:
+; CHECK_FMA: # BB#0:
+; CHECK_FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK_FMA-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0
+; CHECK_FMA-NEXT: retq
+;
+; CHECK_FMA4-LABEL: test_v4f32_fneg_fmul:
+; CHECK_FMA4: # BB#0:
+; CHECK_FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK_FMA4-NEXT: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK_FMA4-NEXT: retq
+ %m = fmul <4 x float> %x, %y
+ %n = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %m
+ ret <4 x float> %n
+}
+
attributes #0 = { "unsafe-fp-math"="true" }
Index: lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- lib/Target/X86/X86ISelLowering.cpp
+++ lib/Target/X86/X86ISelLowering.cpp
@@ -13381,7 +13381,8 @@
/// The only differences between FABS and FNEG are the mask and the logic op.
/// FNEG also has a folding opportunity for FNEG(FABS(x)).
-static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
+static SDValue LowerFABSorFNEG(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
"Wrong opcode for lowering FABS or FNEG.");
@@ -13396,6 +13397,18 @@
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
+ MVT ScalarVT = VT.getScalarType();
+
+ // If we're negating a FMUL node on a target with FMA, then we can avoid the
+ // use of a constant by performing (0 - A*B) instead.
+ if (!IsFABS && Op.getOperand(0).getOpcode() == ISD::FMUL &&
+ DAG.getTarget().Options.UnsafeFPMath &&
+ (ScalarVT == MVT::f32 || ScalarVT == MVT::f64) &&
+ (Subtarget->hasFMA() || Subtarget->hasFMA4() || Subtarget->hasAVX512())) {
+ SDValue Zero = DAG.getConstantFP(0.0, dl, VT);
+ return DAG.getNode(X86ISD::FNMADD, dl, VT, Op.getOperand(0).getOperand(0),
+ Op.getOperand(0).getOperand(1), Zero);
+ }
// FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
// decide if we should generate a 16-byte constant mask when we only need 4 or
@@ -19780,7 +19793,7 @@
case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
case ISD::FABS:
- case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
+ case ISD::FNEG: return LowerFABSorFNEG(Op, Subtarget, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
case ISD::SETCC: return LowerSETCC(Op, DAG);
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D14909.40882.patch
Type: text/x-patch
Size: 3568 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20151122/43edeb49/attachment.bin>
More information about the llvm-commits
mailing list