[llvm] ccfe7e0 - [AArch64] Combine vector FNEG+FMA into `FNML[A|S]` (#167900)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 5 02:22:02 PST 2025
Author: Damian Heaton
Date: 2025-12-05T10:21:58Z
New Revision: ccfe7e09540f94e0419c607396e270f8d456e608
URL: https://github.com/llvm/llvm-project/commit/ccfe7e09540f94e0419c607396e270f8d456e608
DIFF: https://github.com/llvm/llvm-project/commit/ccfe7e09540f94e0419c607396e270f8d456e608.diff
LOG: [AArch64] Combine vector FNEG+FMA into `FNML[A|S]` (#167900)
This allows for FNEG + FMA sequences to be combined into a single
operation, with `FNML[A|S]`, `FNMAD`, or `FNMSB` selected depending on
the operand order, similarly to how `performSVEMulAddSubCombine` enables
generating `ML[A|S]` instructions by converting the ADD/SUB intrinsics
to scalable vectors.
Added:
llvm/test/CodeGen/AArch64/sve-fmsub.ll
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.h
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7a15d7b75f1b9..7199319ccdd9f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1523,6 +1523,9 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
for (auto VT : {MVT::v16i8, MVT::v8i8, MVT::v4i16, MVT::v2i32})
setOperationAction(ISD::GET_ACTIVE_LANE_MASK, VT, Custom);
+
+ for (auto VT : {MVT::v8f16, MVT::v4f32, MVT::v2f64})
+ setOperationAction(ISD::FMA, VT, Custom);
}
if (Subtarget->isSVEorStreamingSVEAvailable()) {
@@ -7781,6 +7784,46 @@ SDValue AArch64TargetLowering::LowerFMUL(SDValue Op, SelectionDAG &DAG) const {
return FCVTNT(VT, BottomBF16, Pg, TopF32);
}
+SDValue AArch64TargetLowering::LowerFMA(SDValue Op, SelectionDAG &DAG) const {
+ SDValue OpA = Op->getOperand(0);
+ SDValue OpB = Op->getOperand(1);
+ SDValue OpC = Op->getOperand(2);
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+
+ assert(VT.isVector() && "Scalar fma lowering should be handled by patterns");
+
+ // Bail early if we're definitely not looking to merge FNEGs into the FMA.
+ if (VT != MVT::v8f16 && VT != MVT::v4f32 && VT != MVT::v2f64)
+ return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMA_PRED);
+
+ if (OpC.getOpcode() != ISD::FNEG)
+ return useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable())
+ ? LowerToPredicatedOp(Op, DAG, AArch64ISD::FMA_PRED)
+ : Op; // Fallback to NEON lowering.
+
+ // Convert FMA/FNEG nodes to SVE to enable the following patterns:
+ // fma(a, b, neg(c)) -> fnmls(a, b, c)
+ // fma(neg(a), b, neg(c)) -> fnmla(a, b, c)
+ // fma(a, neg(b), neg(c)) -> fnmla(a, b, c)
+ SDValue Pg = getPredicateForVector(DAG, DL, VT);
+ EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+
+ auto ConvertToScalableFnegMt = [&](SDValue Op) {
+ if (Op.getOpcode() == ISD::FNEG)
+ Op = LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEG_MERGE_PASSTHRU);
+ return convertToScalableVector(DAG, ContainerVT, Op);
+ };
+
+ OpA = ConvertToScalableFnegMt(OpA);
+ OpB = ConvertToScalableFnegMt(OpB);
+ OpC = ConvertToScalableFnegMt(OpC);
+
+ SDValue ScalableRes =
+ DAG.getNode(AArch64ISD::FMA_PRED, DL, ContainerVT, Pg, OpA, OpB, OpC);
+ return convertFromScalableVector(DAG, VT, ScalableRes);
+}
+
SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
LLVM_DEBUG(dbgs() << "Custom lowering: ");
@@ -7857,7 +7900,7 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
case ISD::FMUL:
return LowerFMUL(Op, DAG);
case ISD::FMA:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMA_PRED);
+ return LowerFMA(Op, DAG);
case ISD::FDIV:
return LowerToPredicatedOp(Op, DAG, AArch64ISD::FDIV_PRED);
case ISD::FNEG:
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 1d4446d287462..e8c026d989eb8 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -615,6 +615,7 @@ class AArch64TargetLowering : public TargetLowering {
SDValue LowerStore128(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFMUL(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFMA(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerMGATHER(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerMSCATTER(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index bfa4ce6da212b..7b51987d3c603 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -462,6 +462,7 @@ def AArch64fmlsidx : PatFrags<(ops node:$acc, node:$op1, node:$op2, node:$idx),
def AArch64fnmla_p : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
[(int_aarch64_sve_fnmla_u node:$pg, node:$za, node:$zn, node:$zm),
(AArch64fma_p node:$pg, (AArch64fneg_mt node:$pg, node:$zn, (undef)), node:$zm, (AArch64fneg_mt node:$pg, node:$za, (undef))),
+ (AArch64fma_p node:$pg, node:$zn, (AArch64fneg_mt node:$pg, node:$zm, (undef)), (AArch64fneg_mt node:$pg, node:$za, (undef))),
(AArch64fneg_mt_nsz node:$pg, (AArch64fma_p node:$pg, node:$zn, node:$zm, node:$za), (undef))]>;
def AArch64fnmls_p : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
diff --git a/llvm/test/CodeGen/AArch64/sve-fmsub.ll b/llvm/test/CodeGen/AArch64/sve-fmsub.ll
new file mode 100644
index 0000000000000..d2e129e5db796
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fmsub.ll
@@ -0,0 +1,324 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mattr=+sve %s -o - | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mattr=+sme -force-streaming %s -o - | FileCheck %s --check-prefixes=CHECK
+
+target triple = "aarch64"
+
+; A * B + C
+; Negate only the addend:
+
+define <vscale x 2 x double> @fma_negC_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
+; CHECK-LABEL: fma_negC_nxv2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: fnmsb z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 2 x double> %c
+ %0 = tail call <vscale x 2 x double> @llvm.fmuladd(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %neg)
+ ret <vscale x 2 x double> %0
+}
+
+define <vscale x 4 x float> @fma_negC_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
+; CHECK-LABEL: fma_negC_nxv4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: fnmsb z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 4 x float> %c
+ %0 = tail call <vscale x 4 x float> @llvm.fmuladd(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %neg)
+ ret <vscale x 4 x float> %0
+}
+
+define <vscale x 8 x half> @fma_negC_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
+; CHECK-LABEL: fma_negC_nxv8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 8 x half> %c
+ %0 = tail call <vscale x 8 x half> @llvm.fmuladd(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %neg)
+ ret <vscale x 8 x half> %0
+}
+
+define <2 x double> @fma_negC_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: fma_negC_v2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d, vl2
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmsb z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <2 x double> %c
+ %0 = tail call <2 x double> @llvm.fmuladd(<2 x double> %a, <2 x double> %b, <2 x double> %neg)
+ ret <2 x double> %0
+}
+
+define <4 x float> @fma_negC_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: fma_negC_v4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmsb z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %b, <4 x float> %neg)
+ ret <4 x float> %0
+}
+
+define <8 x half> @fma_negC_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: fma_negC_v8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h, vl8
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <8 x half> %c
+ %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %a, <8 x half> %b, <8 x half> %neg)
+ ret <8 x half> %0
+}
+
+define <4 x float> @fma_negC_commutative_v4f32(<4 x float> %c, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: fma_negC_commutative_v4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmls z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %b, <4 x float> %neg)
+ ret <4 x float> %0
+}
+
+; Negate one multiplicand (A/B) and the addend (C).
+
+define <vscale x 2 x double> @fma_negA_negC_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
+; CHECK-LABEL: fma_negA_negC_nxv2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 2 x double> %a
+ %neg1 = fneg <vscale x 2 x double> %c
+ %0 = tail call <vscale x 2 x double> @llvm.fmuladd(<vscale x 2 x double> %neg, <vscale x 2 x double> %b, <vscale x 2 x double> %neg1)
+ ret <vscale x 2 x double> %0
+}
+
+define <vscale x 4 x float> @fma_negA_negC_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
+; CHECK-LABEL: fma_negA_negC_nxv4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 4 x float> %a
+ %neg1 = fneg <vscale x 4 x float> %c
+ %0 = tail call <vscale x 4 x float> @llvm.fmuladd(<vscale x 4 x float> %neg, <vscale x 4 x float> %b, <vscale x 4 x float> %neg1)
+ ret <vscale x 4 x float> %0
+}
+
+define <vscale x 8 x half> @fma_negA_negC_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
+; CHECK-LABEL: fma_negA_negC_nxv8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 8 x half> %a
+ %neg1 = fneg <vscale x 8 x half> %c
+ %0 = tail call <vscale x 8 x half> @llvm.fmuladd(<vscale x 8 x half> %neg, <vscale x 8 x half> %b, <vscale x 8 x half> %neg1)
+ ret <vscale x 8 x half> %0
+}
+
+define <2 x double> @fma_negA_negC_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: fma_negA_negC_v2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d, vl2
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <2 x double> %a
+ %neg1 = fneg <2 x double> %c
+ %0 = tail call <2 x double> @llvm.fmuladd(<2 x double> %neg, <2 x double> %b, <2 x double> %neg1)
+ ret <2 x double> %0
+}
+
+define <4 x float> @fma_negA_negC_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: fma_negA_negC_v4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <4 x float> %a
+ %neg1 = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %neg, <4 x float> %b, <4 x float> %neg1)
+ ret <4 x float> %0
+}
+
+define <8 x half> @fma_negA_negC_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: fma_negA_negC_v8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h, vl8
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <8 x half> %a
+ %neg1 = fneg <8 x half> %c
+ %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %neg, <8 x half> %b, <8 x half> %neg1)
+ ret <8 x half> %0
+}
+
+define <4 x float> @fma_negA_negC_commutative_v4f32(<4 x float> %c, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: fma_negA_negC_commutative_v4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <4 x float> %a
+ %neg1 = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %neg, <4 x float> %b, <4 x float> %neg1)
+ ret <4 x float> %0
+}
+
+define <vscale x 2 x double> @fma_negB_negC_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
+; CHECK-LABEL: fma_negB_negC_nxv2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 2 x double> %b
+ %neg1 = fneg <vscale x 2 x double> %c
+ %0 = tail call <vscale x 2 x double> @llvm.fmuladd(<vscale x 2 x double> %a, <vscale x 2 x double> %neg, <vscale x 2 x double> %neg1)
+ ret <vscale x 2 x double> %0
+}
+
+define <vscale x 4 x float> @fma_negB_negC_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
+; CHECK-LABEL: fma_negB_negC_nxv4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 4 x float> %b
+ %neg1 = fneg <vscale x 4 x float> %c
+ %0 = tail call <vscale x 4 x float> @llvm.fmuladd(<vscale x 4 x float> %a, <vscale x 4 x float> %neg, <vscale x 4 x float> %neg1)
+ ret <vscale x 4 x float> %0
+}
+
+define <vscale x 8 x half> @fma_negB_negC_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
+; CHECK-LABEL: fma_negB_negC_nxv8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <vscale x 8 x half> %b
+ %neg1 = fneg <vscale x 8 x half> %c
+ %0 = tail call <vscale x 8 x half> @llvm.fmuladd(<vscale x 8 x half> %a, <vscale x 8 x half> %neg, <vscale x 8 x half> %neg1)
+ ret <vscale x 8 x half> %0
+}
+
+define <2 x double> @fma_negB_negC_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: fma_negB_negC_v2f64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.d, vl2
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <2 x double> %b
+ %neg1 = fneg <2 x double> %c
+ %0 = tail call <2 x double> @llvm.fmuladd(<2 x double> %a, <2 x double> %neg, <2 x double> %neg1)
+ ret <2 x double> %0
+}
+
+define <4 x float> @fma_negB_negC_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: fma_negB_negC_v4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <4 x float> %b
+ %neg1 = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %neg, <4 x float> %neg1)
+ ret <4 x float> %0
+}
+
+define <8 x half> @fma_negB_negC_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: fma_negB_negC_v8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.h, vl8
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <8 x half> %b
+ %neg1 = fneg <8 x half> %c
+ %0 = tail call <8 x half> @llvm.fmuladd(<8 x half> %a, <8 x half> %neg, <8 x half> %neg1)
+ ret <8 x half> %0
+}
+
+define <4 x float> @fma_negB_negC_commutative_v4f32(<4 x float> %c, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: fma_negB_negC_commutative_v4f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ptrue p0.s, vl4
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT: ret
+entry:
+ %neg = fneg <4 x float> %b
+ %neg1 = fneg <4 x float> %c
+ %0 = tail call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %neg, <4 x float> %neg1)
+ ret <4 x float> %0
+}
More information about the llvm-commits
mailing list