[clang] 5e9bc21 - [SveEmitter] Add builtins for SVE2 Multiplication by indexed elements
Sander de Smalen via cfe-commits
cfe-commits at lists.llvm.org
Thu May 7 07:22:10 PDT 2020
Author: Sander de Smalen
Date: 2020-05-07T15:21:37+01:00
New Revision: 5e9bc21eea111df698cf45048b8b0e4c3c245dd5
URL: https://github.com/llvm/llvm-project/commit/5e9bc21eea111df698cf45048b8b0e4c3c245dd5
DIFF: https://github.com/llvm/llvm-project/commit/5e9bc21eea111df698cf45048b8b0e4c3c245dd5.diff
LOG: [SveEmitter] Add builtins for SVE2 Multiplication by indexed elements
This patch adds builtins for:
- svmla_lane
- svmls_lane
- svmul_lane
Added:
clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mla.c
clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mls.c
clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mul.c
clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mla.c
clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mls.c
clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mul.c
Modified:
clang/include/clang/Basic/arm_sve.td
Removed:
################################################################################
diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index db4153b28c19..a271ad9cfb1b 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -1396,6 +1396,15 @@ def SVSBCLB_N : SInst<"svsbclb[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve
def SVSBCLT_N : SInst<"svsbclt[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_sbclt">;
}
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Multiplication by indexed elements
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVMLA_LANE_2 : SInst<"svmla_lane[_{d}]", "ddddi", "silUsUiUl", MergeNone, "aarch64_sve_mla_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLS_LANE_2 : SInst<"svmls_lane[_{d}]", "ddddi", "silUsUiUl", MergeNone, "aarch64_sve_mls_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMUL_LANE_2 : SInst<"svmul_lane[_{d}]", "dddi", "silUsUiUl", MergeNone, "aarch64_sve_mul_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+}
+
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Non-temporal gather/scatter
let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mla.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mla.c
new file mode 100644
index 000000000000..b130780648c4
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mla.c
@@ -0,0 +1,111 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error -verify-ignore-unexpected=note %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svmla_lane_s16(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_s16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mla.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_s16'}}
+ return SVE_ACLE_FUNC(svmla_lane,_s16,,)(op1, op2, op3, 0);
+}
+
+svint16_t test_svmla_lane_s16_1(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_s16_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mla.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_s16'}}
+ return SVE_ACLE_FUNC(svmla_lane,_s16,,)(op1, op2, op3, 7);
+}
+
+svint32_t test_svmla_lane_s32(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mla.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_s32'}}
+ return SVE_ACLE_FUNC(svmla_lane,_s32,,)(op1, op2, op3, 0);
+}
+
+svint32_t test_svmla_lane_s32_1(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_s32_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mla.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_s32'}}
+ return SVE_ACLE_FUNC(svmla_lane,_s32,,)(op1, op2, op3, 3);
+}
+
+svint64_t test_svmla_lane_s64(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mla.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 0)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_s64'}}
+ return SVE_ACLE_FUNC(svmla_lane,_s64,,)(op1, op2, op3, 0);
+}
+
+svint64_t test_svmla_lane_s64_1(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_s64_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mla.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 1)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_s64'}}
+ return SVE_ACLE_FUNC(svmla_lane,_s64,,)(op1, op2, op3, 1);
+}
+
+svuint16_t test_svmla_lane_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_u16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mla.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_u16'}}
+ return SVE_ACLE_FUNC(svmla_lane,_u16,,)(op1, op2, op3, 0);
+}
+
+svuint16_t test_svmla_lane_u16_1(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_u16_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mla.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_u16'}}
+ return SVE_ACLE_FUNC(svmla_lane,_u16,,)(op1, op2, op3, 7);
+}
+
+svuint32_t test_svmla_lane_u32_1(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_u32_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mla.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_u32'}}
+ return SVE_ACLE_FUNC(svmla_lane,_u32,,)(op1, op2, op3, 3);
+}
+
+svuint64_t test_svmla_lane_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mla.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 0)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_u64'}}
+ return SVE_ACLE_FUNC(svmla_lane,_u64,,)(op1, op2, op3, 0);
+}
+
+svuint64_t test_svmla_lane_u64_1(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+ // CHECK-LABEL: test_svmla_lane_u64_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mla.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 1)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmla_lane_u64'}}
+ return SVE_ACLE_FUNC(svmla_lane,_u64,,)(op1, op2, op3, 1);
+}
diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mls.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mls.c
new file mode 100644
index 000000000000..13ecb1fadfea
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mls.c
@@ -0,0 +1,120 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error -verify-ignore-unexpected=note %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svmls_lane_s16(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_s16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mls.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_s16'}}
+ return SVE_ACLE_FUNC(svmls_lane,_s16,,)(op1, op2, op3, 0);
+}
+
+svint16_t test_svmls_lane_s16_1(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_s16_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mls.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_s16'}}
+ return SVE_ACLE_FUNC(svmls_lane,_s16,,)(op1, op2, op3, 7);
+}
+
+svint32_t test_svmls_lane_s32(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mls.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_s32'}}
+ return SVE_ACLE_FUNC(svmls_lane,_s32,,)(op1, op2, op3, 0);
+}
+
+svint32_t test_svmls_lane_s32_1(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_s32_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mls.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_s32'}}
+ return SVE_ACLE_FUNC(svmls_lane,_s32,,)(op1, op2, op3, 3);
+}
+
+svint64_t test_svmls_lane_s64(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mls.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 0)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_s64'}}
+ return SVE_ACLE_FUNC(svmls_lane,_s64,,)(op1, op2, op3, 0);
+}
+
+svint64_t test_svmls_lane_s64_1(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_s64_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mls.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 1)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_s64'}}
+ return SVE_ACLE_FUNC(svmls_lane,_s64,,)(op1, op2, op3, 1);
+}
+
+svuint16_t test_svmls_lane_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_u16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mls.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_u16'}}
+ return SVE_ACLE_FUNC(svmls_lane,_u16,,)(op1, op2, op3, 0);
+}
+
+svuint16_t test_svmls_lane_u16_1(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_u16_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mls.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_u16'}}
+ return SVE_ACLE_FUNC(svmls_lane,_u16,,)(op1, op2, op3, 7);
+}
+
+svuint32_t test_svmls_lane_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mls.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_u32'}}
+ return SVE_ACLE_FUNC(svmls_lane,_u32,,)(op1, op2, op3, 0);
+}
+
+svuint32_t test_svmls_lane_u32_1(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_u32_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mls.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_u32'}}
+ return SVE_ACLE_FUNC(svmls_lane,_u32,,)(op1, op2, op3, 3);
+}
+
+svuint64_t test_svmls_lane_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mls.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 0)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_u64'}}
+ return SVE_ACLE_FUNC(svmls_lane,_u64,,)(op1, op2, op3, 0);
+}
+
+svuint64_t test_svmls_lane_u64_1(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+ // CHECK-LABEL: test_svmls_lane_u64_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mls.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3, i32 1)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmls_lane_u64'}}
+ return SVE_ACLE_FUNC(svmls_lane,_u64,,)(op1, op2, op3, 1);
+}
diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mul.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mul.c
new file mode 100644
index 000000000000..fff05ca585fe
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mul.c
@@ -0,0 +1,120 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error -verify-ignore-unexpected=note %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svmul_lane_s16(svint16_t op1, svint16_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_s16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_s16'}}
+ return SVE_ACLE_FUNC(svmul_lane,_s16,,)(op1, op2, 0);
+}
+
+svint16_t test_svmul_lane_s16_1(svint16_t op1, svint16_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_s16_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_s16'}}
+ return SVE_ACLE_FUNC(svmul_lane,_s16,,)(op1, op2, 7);
+}
+
+svint32_t test_svmul_lane_s32(svint32_t op1, svint32_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_s32'}}
+ return SVE_ACLE_FUNC(svmul_lane,_s32,,)(op1, op2, 0);
+}
+
+svint32_t test_svmul_lane_s32_1(svint32_t op1, svint32_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_s32_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_s32'}}
+ return SVE_ACLE_FUNC(svmul_lane,_s32,,)(op1, op2, 3);
+}
+
+svint64_t test_svmul_lane_s64(svint64_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_s64'}}
+ return SVE_ACLE_FUNC(svmul_lane,_s64,,)(op1, op2, 0);
+}
+
+svint64_t test_svmul_lane_s64_1(svint64_t op1, svint64_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_s64_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_s64'}}
+ return SVE_ACLE_FUNC(svmul_lane,_s64,,)(op1, op2, 1);
+}
+
+svuint16_t test_svmul_lane_u16(svuint16_t op1, svuint16_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_u16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_u16'}}
+ return SVE_ACLE_FUNC(svmul_lane,_u16,,)(op1, op2, 0);
+}
+
+svuint16_t test_svmul_lane_u16_1(svuint16_t op1, svuint16_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_u16_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.lane.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7)
+ // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_u16'}}
+ return SVE_ACLE_FUNC(svmul_lane,_u16,,)(op1, op2, 7);
+}
+
+svuint32_t test_svmul_lane_u32(svuint32_t op1, svuint32_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_u32'}}
+ return SVE_ACLE_FUNC(svmul_lane,_u32,,)(op1, op2, 0);
+}
+
+svuint32_t test_svmul_lane_u32_1(svuint32_t op1, svuint32_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_u32_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3)
+ // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_u32'}}
+ return SVE_ACLE_FUNC(svmul_lane,_u32,,)(op1, op2, 3);
+}
+
+svuint64_t test_svmul_lane_u64(svuint64_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 0)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_u64'}}
+ return SVE_ACLE_FUNC(svmul_lane,_u64,,)(op1, op2, 0);
+}
+
+svuint64_t test_svmul_lane_u64_1(svuint64_t op1, svuint64_t op2)
+{
+ // CHECK-LABEL: test_svmul_lane_u64_1
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1)
+ // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+ // expected-warning at +1 {{implicit declaration of function 'svmul_lane_u64'}}
+ return SVE_ACLE_FUNC(svmul_lane,_u64,,)(op1, op2, 1);
+}
diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mla.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mla.c
new file mode 100644
index 000000000000..969317db8a0b
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mla.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint16_t test_svmla_lane_s16(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+ return SVE_ACLE_FUNC(svmla_lane,_s16,,)(op1, op2, op3, 8);
+}
+
+svint32_t test_svmla_lane_s32(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+ return SVE_ACLE_FUNC(svmla_lane,_s32,,)(op1, op2, op3, -1);
+}
+
+svint64_t test_svmla_lane_s64(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}}
+ return SVE_ACLE_FUNC(svmla_lane,_s64,,)(op1, op2, op3, 2);
+}
+
+svuint16_t test_svmla_lane_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+ return SVE_ACLE_FUNC(svmla_lane,_u16,,)(op1, op2, op3, -1);
+}
+
+svuint32_t test_svmla_lane_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+ return SVE_ACLE_FUNC(svmla_lane,_u32,,)(op1, op2, op3, 4);
+}
+
+svuint64_t test_svmla_lane_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}}
+ return SVE_ACLE_FUNC(svmla_lane,_u64,,)(op1, op2, op3, -1);
+}
diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mls.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mls.c
new file mode 100644
index 000000000000..1467931a4b31
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mls.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint16_t test_svmls_lane_s16(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+ return SVE_ACLE_FUNC(svmls_lane,_s16,,)(op1, op2, op3, -1);
+}
+
+svint32_t test_svmls_lane_s32(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+ return SVE_ACLE_FUNC(svmls_lane,_s32,,)(op1, op2, op3, 4);
+}
+
+svint64_t test_svmls_lane_s64(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}}
+ return SVE_ACLE_FUNC(svmls_lane,_s64,,)(op1, op2, op3, -1);
+}
+
+svuint16_t test_svmls_lane_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+ return SVE_ACLE_FUNC(svmls_lane,_u16,,)(op1, op2, op3, 8);
+}
+
+svuint32_t test_svmls_lane_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+ return SVE_ACLE_FUNC(svmls_lane,_u32,,)(op1, op2, op3, -1);
+}
+
+svuint64_t test_svmls_lane_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}}
+ return SVE_ACLE_FUNC(svmls_lane,_u64,,)(op1, op2, op3, 2);
+}
diff --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mul.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mul.c
new file mode 100644
index 000000000000..73d7d6f79d34
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mul.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint16_t test_svmul_lane_s16(svint16_t op1, svint16_t op2)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+ return SVE_ACLE_FUNC(svmul_lane,_s16,,)(op1, op2, 8);
+}
+
+svint32_t test_svmul_lane_s32(svint32_t op1, svint32_t op2)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+ return SVE_ACLE_FUNC(svmul_lane,_s32,,)(op1, op2, -1);
+}
+
+svint64_t test_svmul_lane_s64(svint64_t op1, svint64_t op2)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}}
+ return SVE_ACLE_FUNC(svmul_lane,_s64,,)(op1, op2, 2);
+}
+
+svuint16_t test_svmul_lane_u16(svuint16_t op1, svuint16_t op2)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+ return SVE_ACLE_FUNC(svmul_lane,_u16,,)(op1, op2, -1);
+}
+
+svuint32_t test_svmul_lane_u32(svuint32_t op1, svuint32_t op2)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+ return SVE_ACLE_FUNC(svmul_lane,_u32,,)(op1, op2, 4);
+}
+
+svuint64_t test_svmul_lane_u64(svuint64_t op1, svuint64_t op2)
+{
+ // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 1]}}
+ return SVE_ACLE_FUNC(svmul_lane,_u64,,)(op1, op2, -1);
+}
More information about the cfe-commits
mailing list