[clang] 0ddb203 - [SveEmitter] Add builtins for compares and ReverseCompare flag.
Sander de Smalen via cfe-commits
cfe-commits at lists.llvm.org
Fri Apr 24 06:35:06 PDT 2020
Author: Sander de Smalen
Date: 2020-04-24T14:33:47+01:00
New Revision: 0ddb2034c161bbc40035ae5a9d8864d216df84f2
URL: https://github.com/llvm/llvm-project/commit/0ddb2034c161bbc40035ae5a9d8864d216df84f2
DIFF: https://github.com/llvm/llvm-project/commit/0ddb2034c161bbc40035ae5a9d8864d216df84f2.diff
LOG: [SveEmitter] Add builtins for compares and ReverseCompare flag.
The IsReverseCompare flag tells CGBuiltin to swap the operands,
so that a LT/LE intrinsics can be expressed in terms of GE/GT
intrinsics.
This patch also adds builtins for the wide-variants of the compares.
Reviewers: SjoerdMeijer, efriedma, ctetreau
Reviewed By: efriedma
Tags: #clang
Differential Revision: https://reviews.llvm.org/D78747
Added:
clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpeq.c
clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpge.c
clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpgt.c
clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmple.c
clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmplt.c
clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpne.c
clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpuo.c
Modified:
clang/include/clang/Basic/TargetBuiltins.h
clang/include/clang/Basic/arm_sve.td
clang/lib/CodeGen/CGBuiltin.cpp
clang/utils/TableGen/SveEmitter.cpp
Removed:
################################################################################
diff --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h
index 1a9cb4cda1a4..1c193ea64a28 100644
--- a/clang/include/clang/Basic/TargetBuiltins.h
+++ b/clang/include/clang/Basic/TargetBuiltins.h
@@ -239,6 +239,7 @@ namespace clang {
bool isOverloadWhileRW() const { return Flags & IsOverloadWhileRW; }
bool isOverloadCvt() const { return Flags & IsOverloadCvt; }
bool isPrefetch() const { return Flags & IsPrefetch; }
+ bool isReverseCompare() const { return Flags & ReverseCompare; }
uint64_t getBits() const { return Flags; }
bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }
diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index 5709dc8b8781..9387f1d711f0 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -70,6 +70,9 @@
// q: 1/4 width elements, 4x element count
// o: 4x width elements, 1/4 element count
//
+// w: vector of element type promoted to 64bits, vector maintains
+// signedness of its element type.
+// j: element type promoted to 64bits (splat to vector type)
// i: constant uint64_t
// k: int32_t
// l: int64_t
@@ -179,6 +182,7 @@ def IsOverloadCvt : FlagType<0x00800000>; // Use {typeof(operand0),
def OverloadKindMask : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
def IsByteIndexed : FlagType<0x01000000>;
def IsPrefetch : FlagType<0x08000000>; // Contiguous prefetches.
+def ReverseCompare : FlagType<0x20000000>; // Compare operands must be swapped.
// These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
class ImmCheckType<int val> {
@@ -610,6 +614,53 @@ def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNo
// Shifts
def SVASRD_M : SInst<"svasrd[_n_{d}]", "dPdi", "csil", MergeOp1, "aarch64_sve_asrd", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+////////////////////////////////////////////////////////////////////////////////
+// Integer comparisons
+
+def SVCMPEQ : SInst<"svcmpeq[_{d}]", "PPdd", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpeq">;
+def SVCMPNE : SInst<"svcmpne[_{d}]", "PPdd", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpne">;
+def SVCMPGE : SInst<"svcmpge[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpge">;
+def SVCMPGT : SInst<"svcmpgt[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpgt">;
+def SVCMPLE : SInst<"svcmple[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpge", [ReverseCompare]>;
+def SVCMPLT : SInst<"svcmplt[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpgt", [ReverseCompare]>;
+def SVCMPHI : SInst<"svcmpgt[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi">;
+def SVCMPHS : SInst<"svcmpge[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs">;
+def SVCMPLO : SInst<"svcmplt[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi", [ReverseCompare]>;
+def SVCMPLS : SInst<"svcmple[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs", [ReverseCompare]>;
+
+def SVCMPEQ_N : SInst<"svcmpeq[_n_{d}]", "PPda", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpeq">;
+def SVCMPNE_N : SInst<"svcmpne[_n_{d}]", "PPda", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpne">;
+def SVCMPGE_N : SInst<"svcmpge[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpge">;
+def SVCMPGT_N : SInst<"svcmpgt[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpgt">;
+def SVCMPLE_N : SInst<"svcmple[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpge", [ReverseCompare]>;
+def SVCMPLT_N : SInst<"svcmplt[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpgt", [ReverseCompare]>;
+def SVCMPHS_N : SInst<"svcmpge[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs">;
+def SVCMPHI_N : SInst<"svcmpgt[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi">;
+def SVCMPLS_N : SInst<"svcmple[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs", [ReverseCompare]>;
+def SVCMPLO_N : SInst<"svcmplt[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi", [ReverseCompare]>;
+
+def SVCMPEQ_WIDE : SInst<"svcmpeq_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpeq_wide">;
+def SVCMPNE_WIDE : SInst<"svcmpne_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpne_wide">;
+def SVCMPGE_WIDE : SInst<"svcmpge_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpge_wide">;
+def SVCMPGT_WIDE : SInst<"svcmpgt_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpgt_wide">;
+def SVCMPLE_WIDE : SInst<"svcmple_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmple_wide">;
+def SVCMPLT_WIDE : SInst<"svcmplt_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmplt_wide">;
+def SVCMPHI_WIDE : SInst<"svcmpgt_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmphi_wide">;
+def SVCMPHS_WIDE : SInst<"svcmpge_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmphs_wide">;
+def SVCMPLO_WIDE : SInst<"svcmplt_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmplo_wide">;
+def SVCMPLS_WIDE : SInst<"svcmple_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmpls_wide">;
+
+def SVCMPEQ_WIDE_N : SInst<"svcmpeq_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpeq_wide">;
+def SVCMPNE_WIDE_N : SInst<"svcmpne_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpne_wide">;
+def SVCMPGE_WIDE_N : SInst<"svcmpge_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpge_wide">;
+def SVCMPGT_WIDE_N : SInst<"svcmpgt_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpgt_wide">;
+def SVCMPLE_WIDE_N : SInst<"svcmple_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmple_wide">;
+def SVCMPLT_WIDE_N : SInst<"svcmplt_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmplt_wide">;
+def SVCMPHS_WIDE_N : SInst<"svcmpge_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmphs_wide">;
+def SVCMPHI_WIDE_N : SInst<"svcmpgt_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmphi_wide">;
+def SVCMPLO_WIDE_N : SInst<"svcmplt_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmplo_wide">;
+def SVCMPLS_WIDE_N : SInst<"svcmple_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmpls_wide">;
+
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Narrowing DSP operations
let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
@@ -645,6 +696,36 @@ def SVTMAD : SInst<"svtmad[_{d}]", "dddi", "hfd", MergeNone, "aarch64_sve_ftma
def SVMLA_LANE : SInst<"svmla_lane[_{d}]", "ddddi", "hfd", MergeNone, "aarch64_sve_fmla_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
def SVCMLA_LANE : SInst<"svcmla_lane[_{d}]", "ddddii", "hf", MergeNone, "aarch64_sve_fcmla_lane", [], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
ImmCheck<4, ImmCheckComplexRotAll90>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Floating-point comparisons
+
+def SVACGE : SInst<"svacge[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facge">;
+def SVACGT : SInst<"svacgt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facgt">;
+def SVACLE : SInst<"svacle[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facge", [ReverseCompare]>;
+def SVACLT : SInst<"svaclt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare]>;
+def SVCMPUO : SInst<"svcmpuo[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpuo">;
+
+def SVACGE_N : SInst<"svacge[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge">;
+def SVACGT_N : SInst<"svacgt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt">;
+def SVACLE_N : SInst<"svacle[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge", [ReverseCompare]>;
+def SVACLT_N : SInst<"svaclt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare]>;
+def SVCMPUO_N : SInst<"svcmpuo[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpuo">;
+
+def SVCMPEQ_F : SInst<"svcmpeq[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpeq">;
+def SVCMPNE_F : SInst<"svcmpne[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpne">;
+def SVCMPGE_F : SInst<"svcmpge[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpge">;
+def SVCMPGT_F : SInst<"svcmpgt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpgt">;
+def SVCMPLE_F : SInst<"svcmple[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpge", [ReverseCompare]>;
+def SVCMPLT_F : SInst<"svcmplt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpgt", [ReverseCompare]>;
+
+def SVCMPEQ_F_N : SInst<"svcmpeq[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpeq">;
+def SVCMPNE_F_N : SInst<"svcmpne[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpne">;
+def SVCMPGE_F_N : SInst<"svcmpge[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpge">;
+def SVCMPGT_F_N : SInst<"svcmpgt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpgt">;
+def SVCMPLE_F_N : SInst<"svcmple[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpge", [ReverseCompare]>;
+def SVCMPLT_F_N : SInst<"svcmplt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpgt", [ReverseCompare]>;
+
////////////////////////////////////////////////////////////////////////////////
// Floating-point conversions
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 468bc4eab59d..3048c6441ba2 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -7892,6 +7892,9 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
}
+ if (TypeFlags.isReverseCompare())
+ std::swap(Ops[1], Ops[2]);
+
// Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
llvm::Type *OpndTy = Ops[1]->getType();
diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpeq.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpeq.c
new file mode 100644
index 000000000000..b76bd8ee1beb
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpeq.c
@@ -0,0 +1,293 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svcmpeq_s8(svbool_t pg, svint8_t op1, svint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpeq,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_s16(svbool_t pg, svint16_t op1, svint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_s32(svbool_t pg, svint32_t op1, svint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_s64(svbool_t pg, svint64_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_s64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_u8(svbool_t pg, svuint8_t op1, svuint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpeq,_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_u16(svbool_t pg, svuint16_t op1, svuint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_u16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_u32(svbool_t pg, svuint32_t op1, svuint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_u32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_u64(svbool_t pg, svuint64_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_u64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_s64(svbool_t pg, svint64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_s64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_u64(svbool_t pg, svuint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_u64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_wide_s8(svbool_t pg, svint8_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_wide_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpeq_wide,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_wide_s16(svbool_t pg, svint16_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_wide_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq_wide,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_wide_s32(svbool_t pg, svint32_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_wide_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq_wide,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_s8(svbool_t pg, svint8_t op1, int8_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_s16(svbool_t pg, svint16_t op1, int16_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_s32(svbool_t pg, svint32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_u8(svbool_t pg, svuint8_t op1, uint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_u8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_u16(svbool_t pg, svuint16_t op1, uint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_u16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_u32(svbool_t pg, svuint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_u32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_f16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpeq.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_f32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpeq.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_f64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpeq.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_f16(svbool_t pg, svfloat16_t op1, float16_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_f16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpeq.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_f32(svbool_t pg, svfloat32_t op1, float32_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_f32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpeq.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_n_f64(svbool_t pg, svfloat64_t op1, float64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_n_f64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpeq.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq,_n_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_wide_n_s8(svbool_t pg, svint8_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_wide_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpeq_wide,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_wide_n_s16(svbool_t pg, svint16_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_wide_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq_wide,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpeq_wide_n_s32(svbool_t pg, svint32_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpeq_wide_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpeq_wide,_n_s32,,)(pg, op1, op2);
+}
diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpge.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpge.c
new file mode 100644
index 000000000000..a5fed360db3d
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpge.c
@@ -0,0 +1,352 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svcmpge_s8(svbool_t pg, svint8_t op1, svint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpge,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_s16(svbool_t pg, svint16_t op1, svint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_s32(svbool_t pg, svint32_t op1, svint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_s64(svbool_t pg, svint64_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_s64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_u8(svbool_t pg, svuint8_t op1, svuint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpge,_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_u16(svbool_t pg, svuint16_t op1, svuint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_u16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_u32(svbool_t pg, svuint32_t op1, svuint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_u32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_u64(svbool_t pg, svuint64_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_u64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_s64(svbool_t pg, svint64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_s64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_n_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_u64(svbool_t pg, svuint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_u64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_n_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_s8(svbool_t pg, svint8_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_s16(svbool_t pg, svint16_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_s32(svbool_t pg, svint32_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_u8(svbool_t pg, svuint8_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_u16(svbool_t pg, svuint16_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_u16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_u32(svbool_t pg, svuint32_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_u32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_s8(svbool_t pg, svint8_t op1, int8_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpge,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_s16(svbool_t pg, svint16_t op1, int16_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_s32(svbool_t pg, svint32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_n_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_u8(svbool_t pg, svuint8_t op1, uint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_u8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpge,_n_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_u16(svbool_t pg, svuint16_t op1, uint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_u16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_n_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_u32(svbool_t pg, svuint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_u32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_n_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_f16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpge.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_f32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpge.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_f64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpge.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_f16(svbool_t pg, svfloat16_t op1, float16_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_f16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpge.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_n_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_f32(svbool_t pg, svfloat32_t op1, float32_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_f32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpge.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_n_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_n_f64(svbool_t pg, svfloat64_t op1, float64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_n_f64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpge.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge,_n_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_n_s8(svbool_t pg, svint8_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_n_s16(svbool_t pg, svint16_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_n_s32(svbool_t pg, svint32_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_n_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_n_u8(svbool_t pg, svuint8_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_n_u8
+ // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_n_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_n_u16(svbool_t pg, svuint16_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_n_u16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_n_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpge_wide_n_u32(svbool_t pg, svuint32_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpge_wide_n_u32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpge_wide,_n_u32,,)(pg, op1, op2);
+}
diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpgt.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpgt.c
new file mode 100644
index 000000000000..76c0a63c2863
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpgt.c
@@ -0,0 +1,352 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svcmpgt_s8(svbool_t pg, svint8_t op1, svint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpgt,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_s16(svbool_t pg, svint16_t op1, svint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_s32(svbool_t pg, svint32_t op1, svint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_s64(svbool_t pg, svint64_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_s64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_u8(svbool_t pg, svuint8_t op1, svuint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpgt,_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_u16(svbool_t pg, svuint16_t op1, svuint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_u16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_u32(svbool_t pg, svuint32_t op1, svuint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_u32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_u64(svbool_t pg, svuint64_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_u64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_s64(svbool_t pg, svint64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_s64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_u64(svbool_t pg, svuint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_u64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_s8(svbool_t pg, svint8_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_s16(svbool_t pg, svint16_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_s32(svbool_t pg, svint32_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_u8(svbool_t pg, svuint8_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_u16(svbool_t pg, svuint16_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_u16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_u32(svbool_t pg, svuint32_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_u32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_s8(svbool_t pg, svint8_t op1, int8_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_s16(svbool_t pg, svint16_t op1, int16_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_s32(svbool_t pg, svint32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_u8(svbool_t pg, svuint8_t op1, uint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_u8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_u16(svbool_t pg, svuint16_t op1, uint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_u16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_u32(svbool_t pg, svuint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_u32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_f16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpgt.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_f32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpgt.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_f64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_f16(svbool_t pg, svfloat16_t op1, float16_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_f16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpgt.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_f32(svbool_t pg, svfloat32_t op1, float32_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_f32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpgt.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_n_f64(svbool_t pg, svfloat64_t op1, float64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_n_f64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt,_n_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_n_s8(svbool_t pg, svint8_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_n_s16(svbool_t pg, svint16_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_n_s32(svbool_t pg, svint32_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_n_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_n_u8(svbool_t pg, svuint8_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_n_u8
+ // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_n_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_n_u16(svbool_t pg, svuint16_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_n_u16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_n_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpgt_wide_n_u32(svbool_t pg, svuint32_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpgt_wide_n_u32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpgt_wide,_n_u32,,)(pg, op1, op2);
+}
diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmple.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmple.c
new file mode 100644
index 000000000000..a6cf37434657
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmple.c
@@ -0,0 +1,352 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svcmple_s8(svbool_t pg, svint8_t op1, svint8_t op2)
+{
+ // CHECK-LABEL: test_svcmple_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op1)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmple,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_s16(svbool_t pg, svint16_t op1, svint16_t op2)
+{
+ // CHECK-LABEL: test_svcmple_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_s32(svbool_t pg, svint32_t op1, svint32_t op2)
+{
+ // CHECK-LABEL: test_svcmple_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_s64(svbool_t pg, svint64_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_s64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_u8(svbool_t pg, svuint8_t op1, svuint8_t op2)
+{
+ // CHECK-LABEL: test_svcmple_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op1)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmple,_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_u16(svbool_t pg, svuint16_t op1, svuint16_t op2)
+{
+ // CHECK-LABEL: test_svcmple_u16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_u32(svbool_t pg, svuint32_t op1, svuint32_t op2)
+{
+ // CHECK-LABEL: test_svcmple_u32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_u64(svbool_t pg, svuint64_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_u64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_s64(svbool_t pg, svint64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_s64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[DUP]], <vscale x 2 x i64> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_n_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_u64(svbool_t pg, svuint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_u64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[DUP]], <vscale x 2 x i64> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_n_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_s8(svbool_t pg, svint8_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmple_wide,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_s16(svbool_t pg, svint16_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple_wide,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_s32(svbool_t pg, svint32_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple_wide,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_u8(svbool_t pg, svuint8_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmple_wide,_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_u16(svbool_t pg, svuint16_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_u16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple_wide,_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_u32(svbool_t pg, svuint32_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_u32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple_wide,_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_s8(svbool_t pg, svint8_t op1, int8_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[DUP]], <vscale x 16 x i8> %op1)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmple,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_s16(svbool_t pg, svint16_t op1, int16_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[DUP]], <vscale x 8 x i16> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_s32(svbool_t pg, svint32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[DUP]], <vscale x 4 x i32> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_n_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_u8(svbool_t pg, svuint8_t op1, uint8_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_u8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[DUP]], <vscale x 16 x i8> %op1)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmple,_n_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_u16(svbool_t pg, svuint16_t op1, uint16_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_u16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[DUP]], <vscale x 8 x i16> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_n_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_u32(svbool_t pg, svuint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_u32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[DUP]], <vscale x 4 x i32> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_n_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
+{
+ // CHECK-LABEL: test_svcmple_f16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpge.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op2, <vscale x 8 x half> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+{
+ // CHECK-LABEL: test_svcmple_f32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpge.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op2, <vscale x 4 x float> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_f64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpge.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op2, <vscale x 2 x double> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_f16(svbool_t pg, svfloat16_t op1, float16_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_f16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpge.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %[[DUP]], <vscale x 8 x half> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_n_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_f32(svbool_t pg, svfloat32_t op1, float32_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_f32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpge.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %[[DUP]], <vscale x 4 x float> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_n_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_n_f64(svbool_t pg, svfloat64_t op1, float64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_n_f64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpge.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %[[DUP]], <vscale x 2 x double> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple,_n_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_n_s8(svbool_t pg, svint8_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmple_wide,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_n_s16(svbool_t pg, svint16_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple_wide,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_n_s32(svbool_t pg, svint32_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple_wide,_n_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_n_u8(svbool_t pg, svuint8_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_n_u8
+ // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmple_wide,_n_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_n_u16(svbool_t pg, svuint16_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_n_u16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple_wide,_n_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmple_wide_n_u32(svbool_t pg, svuint32_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmple_wide_n_u32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmple_wide,_n_u32,,)(pg, op1, op2);
+}
diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmplt.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmplt.c
new file mode 100644
index 000000000000..a5ed353609ee
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmplt.c
@@ -0,0 +1,352 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svcmplt_s8(svbool_t pg, svint8_t op1, svint8_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op1)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmplt,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_s16(svbool_t pg, svint16_t op1, svint16_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_s32(svbool_t pg, svint32_t op1, svint32_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_s64(svbool_t pg, svint64_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_s64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_u8(svbool_t pg, svuint8_t op1, svuint8_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op1)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmplt,_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_u16(svbool_t pg, svuint16_t op1, svuint16_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_u16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_u32(svbool_t pg, svuint32_t op1, svuint32_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_u32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_u64(svbool_t pg, svuint64_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_u64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_s64(svbool_t pg, svint64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_s64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[DUP]], <vscale x 2 x i64> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_n_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_u64(svbool_t pg, svuint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_u64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[DUP]], <vscale x 2 x i64> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_n_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_s8(svbool_t pg, svint8_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_s16(svbool_t pg, svint16_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_s32(svbool_t pg, svint32_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_u8(svbool_t pg, svuint8_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_u16(svbool_t pg, svuint16_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_u16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_u32(svbool_t pg, svuint32_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_u32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_s8(svbool_t pg, svint8_t op1, int8_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[DUP]], <vscale x 16 x i8> %op1)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmplt,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_s16(svbool_t pg, svint16_t op1, int16_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[DUP]], <vscale x 8 x i16> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_s32(svbool_t pg, svint32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[DUP]], <vscale x 4 x i32> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_n_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_u8(svbool_t pg, svuint8_t op1, uint8_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_u8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[DUP]], <vscale x 16 x i8> %op1)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmplt,_n_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_u16(svbool_t pg, svuint16_t op1, uint16_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_u16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[DUP]], <vscale x 8 x i16> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_n_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_u32(svbool_t pg, svuint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_u32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[DUP]], <vscale x 4 x i32> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_n_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_f16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpgt.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op2, <vscale x 8 x half> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_f32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpgt.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op2, <vscale x 4 x float> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_f64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op2, <vscale x 2 x double> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_f16(svbool_t pg, svfloat16_t op1, float16_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_f16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpgt.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %[[DUP]], <vscale x 8 x half> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_n_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_f32(svbool_t pg, svfloat32_t op1, float32_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_f32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpgt.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %[[DUP]], <vscale x 4 x float> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_n_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_n_f64(svbool_t pg, svfloat64_t op1, float64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_n_f64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %[[DUP]], <vscale x 2 x double> %op1)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt,_n_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_n_s8(svbool_t pg, svint8_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_n_s16(svbool_t pg, svint16_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_n_s32(svbool_t pg, svint32_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_n_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_n_u8(svbool_t pg, svuint8_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_n_u8
+ // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_n_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_n_u16(svbool_t pg, svuint16_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_n_u16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_n_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmplt_wide_n_u32(svbool_t pg, svuint32_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmplt_wide_n_u32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmplt_wide,_n_u32,,)(pg, op1, op2);
+}
diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpne.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpne.c
new file mode 100644
index 000000000000..c98c40a120bc
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpne.c
@@ -0,0 +1,293 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svcmpne_s8(svbool_t pg, svint8_t op1, svint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpne,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_s16(svbool_t pg, svint16_t op1, svint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_s32(svbool_t pg, svint32_t op1, svint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_s64(svbool_t pg, svint64_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_s64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_u8(svbool_t pg, svuint8_t op1, svuint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpne,_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_u16(svbool_t pg, svuint16_t op1, svuint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_u16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_u32(svbool_t pg, svuint32_t op1, svuint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_u32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_u64(svbool_t pg, svuint64_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_u64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_s64(svbool_t pg, svint64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_s64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_n_s64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_u64(svbool_t pg, svuint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_u64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_n_u64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_wide_s8(svbool_t pg, svint8_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_wide_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpne_wide,_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_wide_s16(svbool_t pg, svint16_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_wide_s16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne_wide,_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_wide_s32(svbool_t pg, svint32_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_wide_s32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne_wide,_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_s8(svbool_t pg, svint8_t op1, int8_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpne,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_s16(svbool_t pg, svint16_t op1, int16_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_s32(svbool_t pg, svint32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_n_s32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_u8(svbool_t pg, svuint8_t op1, uint8_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_u8
+ // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpne,_n_u8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_u16(svbool_t pg, svuint16_t op1, uint16_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_u16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_n_u16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_u32(svbool_t pg, svuint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_u32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_n_u32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_f16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpne.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_f32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpne.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_f64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpne.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_f16(svbool_t pg, svfloat16_t op1, float16_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_f16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpne.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_n_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_f32(svbool_t pg, svfloat32_t op1, float32_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_f32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpne.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_n_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_n_f64(svbool_t pg, svfloat64_t op1, float64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_n_f64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpne.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne,_n_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_wide_n_s8(svbool_t pg, svint8_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_wide_n_s8
+ // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svcmpne_wide,_n_s8,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_wide_n_s16(svbool_t pg, svint16_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_wide_n_s16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne_wide,_n_s16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpne_wide_n_s32(svbool_t pg, svint32_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svcmpne_wide_n_s32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 2 x i64> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpne_wide,_n_s32,,)(pg, op1, op2);
+}
diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpuo.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpuo.c
new file mode 100644
index 000000000000..1130d9812647
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_cmpuo.c
@@ -0,0 +1,74 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svcmpuo_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
+{
+ // CHECK-LABEL: test_svcmpuo_f16
+ // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpuo.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpuo,_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpuo_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+{
+ // CHECK-LABEL: test_svcmpuo_f32
+ // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpuo.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpuo,_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpuo_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+{
+ // CHECK-LABEL: test_svcmpuo_f64
+ // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpuo.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpuo,_f64,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpuo_n_f16(svbool_t pg, svfloat16_t op1, float16_t op2)
+{
+ // CHECK-LABEL: test_svcmpuo_n_f16
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpuo.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpuo,_n_f16,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpuo_n_f32(svbool_t pg, svfloat32_t op1, float32_t op2)
+{
+ // CHECK-LABEL: test_svcmpuo_n_f32
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.fcmpuo.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpuo,_n_f32,,)(pg, op1, op2);
+}
+
+svbool_t test_svcmpuo_n_f64(svbool_t pg, svfloat64_t op1, float64_t op2)
+{
+ // CHECK-LABEL: test_svcmpuo_n_f64
+ // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+ // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2)
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpuo.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %[[DUP]])
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svcmpuo,_n_f64,,)(pg, op1, op2);
+}
diff --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp
index 8c8b415df914..9a4b3592a16e 100644
--- a/clang/utils/TableGen/SveEmitter.cpp
+++ b/clang/utils/TableGen/SveEmitter.cpp
@@ -573,6 +573,13 @@ void SVEType::applyModifier(char Mod) {
ElementBitwidth = Bitwidth = 64;
NumVectors = 0;
break;
+ case 'w':
+ ElementBitwidth = 64;
+ break;
+ case 'j':
+ ElementBitwidth = Bitwidth = 64;
+ NumVectors = 0;
+ break;
case 't':
Signed = true;
Float = false;
More information about the cfe-commits
mailing list