[llvm] 3859921 - [AArch64][SME2] Add multi-vector floating point min/max number intrinsics
Kerry McLaughlin via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 7 03:11:06 PST 2023
Author: Kerry McLaughlin
Date: 2023-02-07T11:10:17Z
New Revision: 385992105ef72765582c8f8547b554878d5776e9
URL: https://github.com/llvm/llvm-project/commit/385992105ef72765582c8f8547b554878d5776e9
DIFF: https://github.com/llvm/llvm-project/commit/385992105ef72765582c8f8547b554878d5776e9.diff
LOG: [AArch64][SME2] Add multi-vector floating point min/max number intrinsics
Adds IR intrinsics for the following SME2 instructions:
- fmaxnm/fminnm (single, 2 & 4 vector)
- fmaxnm/fminnm (multi, 2 & 4 vector)
NOTE: These intrinsics are still in development and are subject to future changes.
Reviewed By: david-arm
Differential Revision: https://reviews.llvm.org/D142732
Added:
Modified:
llvm/include/llvm/IR/IntrinsicsAArch64.td
llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
llvm/test/CodeGen/AArch64/sme2-intrinsics-max.ll
llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 2effdf62c034..6479b7d442e0 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -3058,6 +3058,18 @@ let TargetPrefix = "aarch64" in {
}
}
+ //
+ // Multi-vector floating point min/max number
+ //
+
+ foreach instr = ["fmaxnm", "fminnm"] in {
+ def int_aarch64_sve_ # instr # _single_x2 : SME2_VG2_Multi_Single_Intrinsic;
+ def int_aarch64_sve_ # instr # _single_x4 : SME2_VG4_Multi_Single_Intrinsic;
+
+ def int_aarch64_sve_ # instr # _x2 : SME2_VG2_Multi_Multi_Intrinsic;
+ def int_aarch64_sve_ # instr # _x4 : SME2_VG4_Multi_Multi_Intrinsic;
+ }
+
//
// Multi-vector vertical dot-products
//
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 93df312b9645..a9ee34646bde 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -5200,6 +5200,62 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
AArch64::FMIN_VG4_4Z4Z_D}))
SelectDestructiveMultiIntrinsic(Node, 4, true, Op);
return;
+ case Intrinsic::aarch64_sve_fmaxnm_single_x2 :
+ if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
+ Node->getValueType(0),
+ {0, AArch64::FMAXNM_VG2_2ZZ_H, AArch64::FMAXNM_VG2_2ZZ_S,
+ AArch64::FMAXNM_VG2_2ZZ_D}))
+ SelectDestructiveMultiIntrinsic(Node, 2, false, Op);
+ return;
+ case Intrinsic::aarch64_sve_fmaxnm_single_x4 :
+ if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
+ Node->getValueType(0),
+ {0, AArch64::FMAXNM_VG4_4ZZ_H, AArch64::FMAXNM_VG4_4ZZ_S,
+ AArch64::FMAXNM_VG4_4ZZ_D}))
+ SelectDestructiveMultiIntrinsic(Node, 4, false, Op);
+ return;
+ case Intrinsic::aarch64_sve_fminnm_single_x2:
+ if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
+ Node->getValueType(0),
+ {0, AArch64::FMINNM_VG2_2ZZ_H, AArch64::FMINNM_VG2_2ZZ_S,
+ AArch64::FMINNM_VG2_2ZZ_D}))
+ SelectDestructiveMultiIntrinsic(Node, 2, false, Op);
+ return;
+ case Intrinsic::aarch64_sve_fminnm_single_x4:
+ if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
+ Node->getValueType(0),
+ {0, AArch64::FMINNM_VG4_4ZZ_H, AArch64::FMINNM_VG4_4ZZ_S,
+ AArch64::FMINNM_VG4_4ZZ_D}))
+ SelectDestructiveMultiIntrinsic(Node, 4, false, Op);
+ return;
+ case Intrinsic::aarch64_sve_fmaxnm_x2:
+ if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
+ Node->getValueType(0),
+ {0, AArch64::FMAXNM_VG2_2Z2Z_H, AArch64::FMAXNM_VG2_2Z2Z_S,
+ AArch64::FMAXNM_VG2_2Z2Z_D}))
+ SelectDestructiveMultiIntrinsic(Node, 2, true, Op);
+ return;
+ case Intrinsic::aarch64_sve_fmaxnm_x4:
+ if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
+ Node->getValueType(0),
+ {0, AArch64::FMAXNM_VG4_4Z4Z_H, AArch64::FMAXNM_VG4_4Z4Z_S,
+ AArch64::FMAXNM_VG4_4Z4Z_D}))
+ SelectDestructiveMultiIntrinsic(Node, 4, true, Op);
+ return;
+ case Intrinsic::aarch64_sve_fminnm_x2:
+ if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
+ Node->getValueType(0),
+ {0, AArch64::FMINNM_VG2_2Z2Z_H, AArch64::FMINNM_VG2_2Z2Z_S,
+ AArch64::FMINNM_VG2_2Z2Z_D}))
+ SelectDestructiveMultiIntrinsic(Node, 2, true, Op);
+ return;
+ case Intrinsic::aarch64_sve_fminnm_x4:
+ if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
+ Node->getValueType(0),
+ {0, AArch64::FMINNM_VG4_4Z4Z_H, AArch64::FMINNM_VG4_4Z4Z_S,
+ AArch64::FMINNM_VG4_4Z4Z_D}))
+ SelectDestructiveMultiIntrinsic(Node, 4, true, Op);
+ return;
case Intrinsic::aarch64_sve_fcvts_x2:
SelectCVTIntrinsic(Node, 2, AArch64::FCVTZS_2Z2Z_StoS);
return;
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-max.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-max.ll
index d410a6160cdc..f0671cb1f012 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-max.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-max.ll
@@ -827,6 +827,230 @@ define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <v
ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
}
+; FMAXNM (Single, x2)
+
+define { <vscale x 8 x half>, <vscale x 8 x half> } @multi_vec_maxnm_single_x2_f16(<vscale x 8 x half> %dummy, <vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zm) {
+; CHECK-LABEL: multi_vec_maxnm_single_x2_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fmaxnm { z4.h, z5.h }, { z4.h, z5.h }, z3.h
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fmaxnm.single.x2.nxv8f16(<vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zm)
+ ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_maxnm_single_x2_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm) {
+; CHECK-LABEL: multi_vec_maxnm_single_x2_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fmaxnm { z4.s, z5.s }, { z4.s, z5.s }, z3.s
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fmaxnm.single.x2.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm)
+ ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double> } @multi_vec_maxnm_single_x2_f64(<vscale x 8 x half> %dummy, <vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zm) {
+; CHECK-LABEL: multi_vec_maxnm_single_x2_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fmaxnm { z4.d, z5.d }, { z4.d, z5.d }, z3.d
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.fmaxnm.single.x2.nxv2f64(<vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zm)
+ ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+; FMAXNM (Single, x4)
+
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ at multi_vec_maxnm_single_x4_f16(<vscale x 8 x half> %dummy, <vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zdn3, <vscale x 8 x half> %zdn4, <vscale x 8 x half> %zm) {
+; CHECK-LABEL: multi_vec_maxnm_single_x4_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fmaxnm { z24.h - z27.h }, { z24.h - z27.h }, z5.h
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ @llvm.aarch64.sve.fmaxnm.single.x4.nxv8f16(<vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zdn3, <vscale x 8 x half> %zdn4, <vscale x 8 x half> %zm)
+ ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ at multi_vec_maxnm_single_x4_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm) {
+; CHECK-LABEL: multi_vec_maxnm_single_x4_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fmaxnm { z24.s - z27.s }, { z24.s - z27.s }, z5.s
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ @llvm.aarch64.sve.fmaxnm.single.x4.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm)
+ ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ at multi_vec_maxnm_single_x4_f64(<vscale x 8 x half> %dummy, <vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zdn3, <vscale x 2 x double> %zdn4, <vscale x 2 x double> %zm) {
+; CHECK-LABEL: multi_vec_maxnm_single_x4_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fmaxnm { z24.d - z27.d }, { z24.d - z27.d }, z5.d
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ @llvm.aarch64.sve.fmaxnm.single.x4.nxv2f64(<vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zdn3, <vscale x 2 x double> %zdn4, <vscale x 2 x double> %zm)
+ ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+; FMAXNM (Multi, x2)
+
+define { <vscale x 8 x half>, <vscale x 8 x half> } @multi_vec_maxnm_x2_f16(<vscale x 8 x half> %dummy, <vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2) {
+; CHECK-LABEL: multi_vec_maxnm_x2_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fmaxnm { z4.h, z5.h }, { z4.h, z5.h }, { z6.h, z7.h }
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fmaxnm.x2.nxv8f16(<vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2)
+ ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_maxnm_x2_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2) {
+; CHECK-LABEL: multi_vec_maxnm_x2_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fmaxnm { z4.s, z5.s }, { z4.s, z5.s }, { z6.s, z7.s }
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fmaxnm.x2.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2)
+ ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double> } @multi_vec_maxnm_x2_f64(<vscale x 8 x half> %dummy, <vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2) {
+; CHECK-LABEL: multi_vec_maxnm_x2_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fmaxnm { z4.d, z5.d }, { z4.d, z5.d }, { z6.d, z7.d }
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.fmaxnm.x2.nxv2f64(<vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2)
+ ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+; FMAXNM (Multi, x4)
+
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ at multi_vec_maxnm_x4_f16(<vscale x 8 x half> %dummy, <vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zdn3, <vscale x 8 x half> %zdn4, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2, <vscale x 8 x half> %zm3, <vscale x 8 x half> %zm4) {
+; CHECK-LABEL: multi_vec_maxnm_x4_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z30.d, z7.d
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: mov z29.d, z6.d
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z28.d, z5.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: ld1h { z31.h }, p0/z, [x0]
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fmaxnm { z24.h - z27.h }, { z24.h - z27.h }, { z28.h - z31.h }
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ @llvm.aarch64.sve.fmaxnm.x4.nxv8f16(<vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zdn3, <vscale x 8 x half> %zdn4,
+ <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2, <vscale x 8 x half> %zm3, <vscale x 8 x half> %zm4)
+ ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ at multi_vec_maxnm_x4_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2, <vscale x 4 x float> %zm3, <vscale x 4 x float> %zm4) {
+; CHECK-LABEL: multi_vec_maxnm_x4_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z30.d, z7.d
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: mov z29.d, z6.d
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z28.d, z5.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: ld1w { z31.s }, p0/z, [x0]
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fmaxnm { z24.s - z27.s }, { z24.s - z27.s }, { z28.s - z31.s }
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ @llvm.aarch64.sve.fmaxnm.x4.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4,
+ <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2, <vscale x 4 x float> %zm3, <vscale x 4 x float> %zm4)
+ ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ at multi_vec_maxnm_x4_f64(<vscale x 8 x half> %dummy, <vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zdn3, <vscale x 2 x double> %zdn4, <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2, <vscale x 2 x double> %zm3, <vscale x 2 x double> %zm4) {
+; CHECK-LABEL: multi_vec_maxnm_x4_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z30.d, z7.d
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z29.d, z6.d
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z28.d, z5.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: ld1d { z31.d }, p0/z, [x0]
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fmaxnm { z24.d - z27.d }, { z24.d - z27.d }, { z28.d - z31.d }
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ @llvm.aarch64.sve.fmaxnm.x4.nxv2f64(<vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zdn3, <vscale x 2 x double> %zdn4,
+ <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2, <vscale x 2 x double> %zm3, <vscale x 2 x double> %zm4)
+ ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.smax.single.x2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>)
declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.smax.single.x2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.smax.single.x2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
@@ -896,3 +1120,25 @@ declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vsc
@llvm.aarch64.sve.fmax.x4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
@llvm.aarch64.sve.fmax.x4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
+
+declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fmaxnm.single.x2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
+declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fmaxnm.single.x2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.fmaxnm.single.x2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
+
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ @llvm.aarch64.sve.fmaxnm.single.x4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ @llvm.aarch64.sve.fmaxnm.single.x4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ @llvm.aarch64.sve.fmaxnm.single.x4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
+
+declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fmaxnm.x2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
+declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fmaxnm.x2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.fmaxnm.x2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
+
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ @llvm.aarch64.sve.fmaxnm.x4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ @llvm.aarch64.sve.fmaxnm.x4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ @llvm.aarch64.sve.fmaxnm.x4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll
index c9745bf5d178..45b46a9b9056 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll
@@ -827,6 +827,230 @@ define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <v
ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
}
+; FMINNM (Single, x2)
+
+define { <vscale x 8 x half>, <vscale x 8 x half> } @multi_vec_minnm_single_x2_f16(<vscale x 8 x half> %dummy, <vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zm) {
+; CHECK-LABEL: multi_vec_minnm_single_x2_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fminnm { z4.h, z5.h }, { z4.h, z5.h }, z3.h
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fminnm.single.x2.nxv8f16(<vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zm)
+ ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_minnm_single_x2_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm) {
+; CHECK-LABEL: multi_vec_minnm_single_x2_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fminnm { z4.s, z5.s }, { z4.s, z5.s }, z3.s
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fminnm.single.x2.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm)
+ ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double> } @multi_vec_minnm_single_x2_f64(<vscale x 8 x half> %dummy, <vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zm) {
+; CHECK-LABEL: multi_vec_minnm_single_x2_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fminnm { z4.d, z5.d }, { z4.d, z5.d }, z3.d
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.fminnm.single.x2.nxv2f64(<vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zm)
+ ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+; FMINNM (Single, x4)
+
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ at multi_vec_minnm_single_x4_f16(<vscale x 8 x half> %dummy, <vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zdn3, <vscale x 8 x half> %zdn4, <vscale x 8 x half> %zm) {
+; CHECK-LABEL: multi_vec_minnm_single_x4_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fminnm { z24.h - z27.h }, { z24.h - z27.h }, z5.h
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ @llvm.aarch64.sve.fminnm.single.x4.nxv8f16(<vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zdn3, <vscale x 8 x half> %zdn4, <vscale x 8 x half> %zm)
+ ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ at multi_vec_minnm_single_x4_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm) {
+; CHECK-LABEL: multi_vec_minnm_single_x4_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fminnm { z24.s - z27.s }, { z24.s - z27.s }, z5.s
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ @llvm.aarch64.sve.fminnm.single.x4.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm)
+ ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ at multi_vec_minnm_single_x4_f64(<vscale x 8 x half> %dummy, <vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zdn3, <vscale x 2 x double> %zdn4, <vscale x 2 x double> %zm) {
+; CHECK-LABEL: multi_vec_minnm_single_x4_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fminnm { z24.d - z27.d }, { z24.d - z27.d }, z5.d
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ @llvm.aarch64.sve.fminnm.single.x4.nxv2f64(<vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zdn3, <vscale x 2 x double> %zdn4, <vscale x 2 x double> %zm)
+ ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+; FMINNM (Multi, x2)
+
+define { <vscale x 8 x half>, <vscale x 8 x half> } @multi_vec_minnm_x2_f16(<vscale x 8 x half> %dummy, <vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2) {
+; CHECK-LABEL: multi_vec_minnm_x2_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fminnm { z4.h, z5.h }, { z4.h, z5.h }, { z6.h, z7.h }
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fminnm.x2.nxv8f16(<vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2)
+ ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float> } @multi_vec_minnm_x2_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2) {
+; CHECK-LABEL: multi_vec_minnm_x2_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fminnm { z4.s, z5.s }, { z4.s, z5.s }, { z6.s, z7.s }
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fminnm.x2.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2)
+ ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double> } @multi_vec_minnm_x2_f64(<vscale x 8 x half> %dummy, <vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2) {
+; CHECK-LABEL: multi_vec_minnm_x2_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: fminnm { z4.d, z5.d }, { z4.d, z5.d }, { z6.d, z7.d }
+; CHECK-NEXT: mov z0.d, z4.d
+; CHECK-NEXT: mov z1.d, z5.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.fminnm.x2.nxv2f64(<vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2)
+ ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+; FMINNM (Multi, x4)
+
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ at multi_vec_minnm_x4_f16(<vscale x 8 x half> %dummy, <vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zdn3, <vscale x 8 x half> %zdn4, <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2, <vscale x 8 x half> %zm3, <vscale x 8 x half> %zm4) {
+; CHECK-LABEL: multi_vec_minnm_x4_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z30.d, z7.d
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: mov z29.d, z6.d
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z28.d, z5.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: ld1h { z31.h }, p0/z, [x0]
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fminnm { z24.h - z27.h }, { z24.h - z27.h }, { z28.h - z31.h }
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ @llvm.aarch64.sve.fminnm.x4.nxv8f16(<vscale x 8 x half> %zdn1, <vscale x 8 x half> %zdn2, <vscale x 8 x half> %zdn3, <vscale x 8 x half> %zdn4,
+ <vscale x 8 x half> %zm1, <vscale x 8 x half> %zm2, <vscale x 8 x half> %zm3, <vscale x 8 x half> %zm4)
+ ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ at multi_vec_minnm_x4_f32(<vscale x 8 x half> %dummy, <vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4, <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2, <vscale x 4 x float> %zm3, <vscale x 4 x float> %zm4) {
+; CHECK-LABEL: multi_vec_minnm_x4_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z30.d, z7.d
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: mov z29.d, z6.d
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z28.d, z5.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: ld1w { z31.s }, p0/z, [x0]
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fminnm { z24.s - z27.s }, { z24.s - z27.s }, { z28.s - z31.s }
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ @llvm.aarch64.sve.fminnm.x4.nxv4f32(<vscale x 4 x float> %zdn1, <vscale x 4 x float> %zdn2, <vscale x 4 x float> %zdn3, <vscale x 4 x float> %zdn4,
+ <vscale x 4 x float> %zm1, <vscale x 4 x float> %zm2, <vscale x 4 x float> %zm3, <vscale x 4 x float> %zm4)
+ ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ at multi_vec_minnm_x4_f64(<vscale x 8 x half> %dummy, <vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zdn3, <vscale x 2 x double> %zdn4, <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2, <vscale x 2 x double> %zm3, <vscale x 2 x double> %zm4) {
+; CHECK-LABEL: multi_vec_minnm_x4_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z30.d, z7.d
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z29.d, z6.d
+; CHECK-NEXT: mov z27.d, z4.d
+; CHECK-NEXT: mov z28.d, z5.d
+; CHECK-NEXT: mov z26.d, z3.d
+; CHECK-NEXT: ld1d { z31.d }, p0/z, [x0]
+; CHECK-NEXT: mov z25.d, z2.d
+; CHECK-NEXT: mov z24.d, z1.d
+; CHECK-NEXT: fminnm { z24.d - z27.d }, { z24.d - z27.d }, { z28.d - z31.d }
+; CHECK-NEXT: mov z0.d, z24.d
+; CHECK-NEXT: mov z1.d, z25.d
+; CHECK-NEXT: mov z2.d, z26.d
+; CHECK-NEXT: mov z3.d, z27.d
+; CHECK-NEXT: ret
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ @llvm.aarch64.sve.fminnm.x4.nxv2f64(<vscale x 2 x double> %zdn1, <vscale x 2 x double> %zdn2, <vscale x 2 x double> %zdn3, <vscale x 2 x double> %zdn4,
+ <vscale x 2 x double> %zm1, <vscale x 2 x double> %zm2, <vscale x 2 x double> %zm3, <vscale x 2 x double> %zm4)
+ ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.smin.single.x2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>)
declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.smin.single.x2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.smin.single.x2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
@@ -896,3 +1120,25 @@ declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vsc
@llvm.aarch64.sve.fmin.x4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
@llvm.aarch64.sve.fmin.x4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
+
+declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fminnm.single.x2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
+declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fminnm.single.x2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.fminnm.single.x2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
+
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ @llvm.aarch64.sve.fminnm.single.x4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ @llvm.aarch64.sve.fminnm.single.x4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ @llvm.aarch64.sve.fminnm.single.x4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
+
+declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.fminnm.x2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
+declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.fminnm.x2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.fminnm.x2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
+
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }
+ @llvm.aarch64.sve.fminnm.x4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }
+ @llvm.aarch64.sve.fminnm.x4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }
+ @llvm.aarch64.sve.fminnm.x4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
More information about the llvm-commits
mailing list