[clang] [clang] Add missing streaming attributes to SVE builtins (PR #79134)
Sam Tebbs via cfe-commits
cfe-commits at lists.llvm.org
Tue Jan 23 05:22:18 PST 2024
https://github.com/SamTebbs33 created https://github.com/llvm/llvm-project/pull/79134
This patch adds `IsStreamingCompatible` or `IsStreamingOrSVE2p1` to the SVE builtins that missed them.
>From 09e6c0dd9b589481bf226b04bafe4da645e73bf2 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <samuel.tebbs at arm.com>
Date: Tue, 23 Jan 2024 13:11:23 +0000
Subject: [PATCH] [clang] Add missing streaming attributes to SVE builtins
This patch adds missing streaming atttributes to the SVE builtins that
missed them.
---
clang/include/clang/Basic/arm_sve.td | 86 +++++++++----------
.../acle_sve2p1_while_x2.c | 74 ++++++++--------
2 files changed, 83 insertions(+), 77 deletions(-)
diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index 4c5c1b5603f181a..6da30e08e7521e7 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -543,13 +543,13 @@ def SVADRD : SInst<"svadrd[_{0}base]_[{2}]index", "uud", "ilUiUl", MergeNone, "
////////////////////////////////////////////////////////////////////////////////
// Scalar to vector
-def SVDUPQ_8 : SInst<"svdupq[_n]_{d}", "dssssssssssssssss", "cUc", MergeNone>;
-def SVDUPQ_16 : SInst<"svdupq[_n]_{d}", "dssssssss", "sUsh", MergeNone>;
+def SVDUPQ_8 : SInst<"svdupq[_n]_{d}", "dssssssssssssssss", "cUc", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUPQ_16 : SInst<"svdupq[_n]_{d}", "dssssssss", "sUsh", MergeNone, "", [IsStreamingCompatible]>;
let TargetGuard = "sve,bf16" in {
- def SVDUPQ_BF16 : SInst<"svdupq[_n]_{d}", "dssssssss", "b", MergeNone>;
+ def SVDUPQ_BF16 : SInst<"svdupq[_n]_{d}", "dssssssss", "b", MergeNone, "", [IsStreamingCompatible]>;
}
-def SVDUPQ_32 : SInst<"svdupq[_n]_{d}", "dssss", "iUif", MergeNone>;
-def SVDUPQ_64 : SInst<"svdupq[_n]_{d}", "dss", "lUld", MergeNone>;
+def SVDUPQ_32 : SInst<"svdupq[_n]_{d}", "dssss", "iUif", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUPQ_64 : SInst<"svdupq[_n]_{d}", "dss", "lUld", MergeNone, "", [IsStreamingCompatible]>;
multiclass svdup_base<string n, string p, MergeType mt, string i> {
def NAME : SInst<n, p, "csilUcUsUiUlhfd", mt, i, [IsStreamingCompatible]>;
@@ -638,8 +638,8 @@ def SVQADD_N_U : SInst<"svqadd[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64
def SVQSUB_N_S : SInst<"svqsub[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqsub_x", [IsStreamingCompatible]>;
def SVQSUB_N_U : SInst<"svqsub[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_uqsub_x", [IsStreamingCompatible]>;
-def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_sdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
-def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]", "ddqqi", "UiUl", MergeNone, "aarch64_sve_udot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_sdot_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]", "ddqqi", "UiUl", MergeNone, "aarch64_sve_udot_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
////////////////////////////////////////////////////////////////////////////////
// Logical operations
@@ -835,14 +835,14 @@ def SVSCALE_N_M : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeOp1, "aarch64_sv
def SVSCALE_N_X : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeAny, "aarch64_sve_fscale", [IsStreamingCompatible]>;
def SVSCALE_N_Z : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeZero, "aarch64_sve_fscale", [IsStreamingCompatible]>;
-defm SVMAD_F : SInstZPZZZ<"svmad", "hfd", "aarch64_sve_fmad", "aarch64_sve_fmla_u", [ReverseMergeAnyAccOp]>;
-defm SVMLA_F : SInstZPZZZ<"svmla", "hfd", "aarch64_sve_fmla", "aarch64_sve_fmla_u">;
-defm SVMLS_F : SInstZPZZZ<"svmls", "hfd", "aarch64_sve_fmls", "aarch64_sve_fmls_u">;
-defm SVMSB_F : SInstZPZZZ<"svmsb", "hfd", "aarch64_sve_fmsb", "aarch64_sve_fmls_u", [ReverseMergeAnyAccOp]>;
-defm SVNMAD_F : SInstZPZZZ<"svnmad", "hfd", "aarch64_sve_fnmad", "aarch64_sve_fnmla_u", [ReverseMergeAnyAccOp]>;
-defm SVNMLA_F : SInstZPZZZ<"svnmla", "hfd", "aarch64_sve_fnmla", "aarch64_sve_fnmla_u">;
-defm SVNMLS_F : SInstZPZZZ<"svnmls", "hfd", "aarch64_sve_fnmls", "aarch64_sve_fnmls_u">;
-defm SVNMSB_F : SInstZPZZZ<"svnmsb", "hfd", "aarch64_sve_fnmsb", "aarch64_sve_fnmls_u", [ReverseMergeAnyAccOp]>;
+defm SVMAD_F : SInstZPZZZ<"svmad", "hfd", "aarch64_sve_fmad", "aarch64_sve_fmla_u", [IsStreamingCompatible, ReverseMergeAnyAccOp]>;
+defm SVMLA_F : SInstZPZZZ<"svmla", "hfd", "aarch64_sve_fmla", "aarch64_sve_fmla_u", [IsStreamingCompatible]>;
+defm SVMLS_F : SInstZPZZZ<"svmls", "hfd", "aarch64_sve_fmls", "aarch64_sve_fmls_u", [IsStreamingCompatible]>;
+defm SVMSB_F : SInstZPZZZ<"svmsb", "hfd", "aarch64_sve_fmsb", "aarch64_sve_fmls_u", [IsStreamingCompatible, ReverseMergeAnyAccOp]>;
+defm SVNMAD_F : SInstZPZZZ<"svnmad", "hfd", "aarch64_sve_fnmad", "aarch64_sve_fnmla_u", [IsStreamingCompatible, ReverseMergeAnyAccOp]>;
+defm SVNMLA_F : SInstZPZZZ<"svnmla", "hfd", "aarch64_sve_fnmla", "aarch64_sve_fnmla_u", [IsStreamingCompatible]>;
+defm SVNMLS_F : SInstZPZZZ<"svnmls", "hfd", "aarch64_sve_fnmls", "aarch64_sve_fnmls_u", [IsStreamingCompatible]>;
+defm SVNMSB_F : SInstZPZZZ<"svnmsb", "hfd", "aarch64_sve_fnmsb", "aarch64_sve_fnmls_u", [IsStreamingCompatible, ReverseMergeAnyAccOp]>;
def SVCADD_M : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeOp1, "aarch64_sve_fcadd", [IsStreamingCompatible], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
def SVCADD_X : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeAny, "aarch64_sve_fcadd", [IsStreamingCompatible], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
@@ -881,11 +881,11 @@ def SVACLE : SInst<"svacle[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facg
def SVACLT : SInst<"svaclt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare, IsStreamingCompatible]>;
def SVCMPUO : SInst<"svcmpuo[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpuo", [IsStreamingCompatible]>;
-def SVACGE_N : SInst<"svacge[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge">;
-def SVACGT_N : SInst<"svacgt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt">;
-def SVACLE_N : SInst<"svacle[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge", [ReverseCompare]>;
-def SVACLT_N : SInst<"svaclt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare]>;
-def SVCMPUO_N : SInst<"svcmpuo[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpuo">;
+def SVACGE_N : SInst<"svacge[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge", [IsStreamingCompatible]>;
+def SVACGT_N : SInst<"svacgt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt", [IsStreamingCompatible]>;
+def SVACLE_N : SInst<"svacle[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge", [ReverseCompare, IsStreamingCompatible]>;
+def SVACLT_N : SInst<"svaclt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPUO_N : SInst<"svcmpuo[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpuo", [IsStreamingCompatible]>;
def SVCMPEQ_F : SInst<"svcmpeq[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpeq", [IsStreamingCompatible]>;
def SVCMPNE_F : SInst<"svcmpne[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpne", [IsStreamingCompatible]>;
@@ -1023,15 +1023,15 @@ def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNo
// splat of any possible lane. It is upto LLVM to pick a more efficient
// instruction such as DUP (indexed) if the lane index fits the range of the
// instruction's immediate.
-def SVDUP_LANE : SInst<"svdup_lane[_{d}]", "ddL", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbl">;
+def SVDUP_LANE : SInst<"svdup_lane[_{d}]", "ddL", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbl", [IsStreamingCompatible]>;
let TargetGuard = "sve,bf16" in {
def SVDUP_LANE_BF16 :
- SInst<"svdup_lane[_{d}]", "ddL", "b", MergeNone, "aarch64_sve_tbl">;
+ SInst<"svdup_lane[_{d}]", "ddL", "b", MergeNone, "aarch64_sve_tbl", [IsStreamingCompatible]>;
}
-def SVDUPQ_LANE : SInst<"svdupq_lane[_{d}]", "ddn", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_dupq_lane">;
+def SVDUPQ_LANE : SInst<"svdupq_lane[_{d}]", "ddn", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_dupq_lane", [IsStreamingCompatible]>;
let TargetGuard = "sve,bf16" in {
- def SVDUPQ_LANE_BF16 : SInst<"svdupq_lane[_{d}]", "ddn", "b", MergeNone, "aarch64_sve_dupq_lane">;
+ def SVDUPQ_LANE_BF16 : SInst<"svdupq_lane[_{d}]", "ddn", "b", MergeNone, "aarch64_sve_dupq_lane", [IsStreamingCompatible]>;
}
def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [IsStreamingCompatible], [ImmCheck<2, ImmCheckExtract, 1>]>;
defm SVLASTA : SVEPerm<"svlasta[_{d}]", "sPd", "aarch64_sve_lasta">;
@@ -1109,11 +1109,11 @@ def SVPFALSE : SInst<"svpfalse[_b]", "Pv", "", MergeNone, "", [IsOverloadNone, I
def SVPTRUE_PAT : SInst<"svptrue_pat_{d}", "PI", "PcPsPiPl", MergeNone, "aarch64_sve_ptrue", [IsStreamingCompatible]>;
def SVPTRUE : SInst<"svptrue_{d}", "Pv", "PcPsPiPl", MergeNone, "aarch64_sve_ptrue", [IsAppendSVALL, IsStreamingCompatible]>;
-def SVDUPQ_B8 : SInst<"svdupq[_n]_{d}", "Pssssssssssssssss", "Pc", MergeNone>;
-def SVDUPQ_B16 : SInst<"svdupq[_n]_{d}", "Pssssssss", "Ps", MergeNone>;
-def SVDUPQ_B32 : SInst<"svdupq[_n]_{d}", "Pssss", "Pi", MergeNone>;
-def SVDUPQ_B64 : SInst<"svdupq[_n]_{d}", "Pss", "Pl", MergeNone>;
-def SVDUP_N_B : SInst<"svdup[_n]_{d}", "Ps", "PcPsPiPl", MergeNone>;
+def SVDUPQ_B8 : SInst<"svdupq[_n]_{d}", "Pssssssssssssssss", "Pc", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUPQ_B16 : SInst<"svdupq[_n]_{d}", "Pssssssss", "Ps", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUPQ_B32 : SInst<"svdupq[_n]_{d}", "Pssss", "Pi", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUPQ_B64 : SInst<"svdupq[_n]_{d}", "Pss", "Pl", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUP_N_B : SInst<"svdup[_n]_{d}", "Ps", "PcPsPiPl", MergeNone, "", [IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
@@ -1268,10 +1268,10 @@ def SVZIP2Q : SInst<"svzip2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNo
let TargetGuard = "sve,bf16,f64mm" in {
def SVTRN1Q_BF16 : SInst<"svtrn1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn1q">;
def SVTRN2Q_BF16 : SInst<"svtrn2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn2q">;
-def SVUZP1Q_BF16 : SInst<"svuzp1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp1q">;
-def SVUZP2Q_BF16 : SInst<"svuzp2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp2q">;
-def SVZIP1Q_BF16 : SInst<"svzip1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip1q">;
-def SVZIP2Q_BF16 : SInst<"svzip2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip2q">;
+def SVUZP1Q_BF16 : SInst<"svuzp1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp1q", [IsStreamingCompatible]>;
+def SVUZP2Q_BF16 : SInst<"svuzp2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp2q", [IsStreamingCompatible]>;
+def SVZIP1Q_BF16 : SInst<"svzip1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip1q", [IsStreamingCompatible]>;
+def SVZIP2Q_BF16 : SInst<"svzip2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip2q", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
@@ -1347,14 +1347,14 @@ def SVWHILEHS_U64 : SInst<"svwhilege_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNon
}
let TargetGuard = "sve2p1|sme2" in {
- def SVWHILEGE_S64_X2 : SInst<"svwhilege_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilege_x2">;
- def SVWHILEGT_S64_X2 : SInst<"svwhilegt_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt_x2">;
- def SVWHILEHI_U64_X2 : SInst<"svwhilegt_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilehi_x2">;
- def SVWHILEHS_U64_X2 : SInst<"svwhilege_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilehs_x2">;
- def SVWHILELE_S64_X2 : SInst<"svwhilele_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilele_x2">;
- def SVWHILELT_S64_X2 : SInst<"svwhilelt_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt_x2">;
- def SVWHILELO_U64_X2 : SInst<"svwhilelt_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilelo_x2">;
- def SVWHILELS_U64_X2 : SInst<"svwhilele_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilels_x2">;
+ def SVWHILEGE_S64_X2 : SInst<"svwhilege_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilege_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILEGT_S64_X2 : SInst<"svwhilegt_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILEHI_U64_X2 : SInst<"svwhilegt_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilehi_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILEHS_U64_X2 : SInst<"svwhilege_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilehs_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILELE_S64_X2 : SInst<"svwhilele_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilele_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILELT_S64_X2 : SInst<"svwhilelt_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILELO_U64_X2 : SInst<"svwhilelt_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilelo_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILELS_U64_X2 : SInst<"svwhilele_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilels_x2", [IsStreamingOrSVE2p1]>;
}
@@ -1831,8 +1831,8 @@ def SVPMULLB_PAIR : SInst<"svpmullb_pair[_{d}]", "ddd", "UcUi", Mer
def SVPMULLB_PAIR_N : SInst<"svpmullb_pair[_n_{d}]", "dda", "UcUi", MergeNone, "aarch64_sve_pmullb_pair", [IsStreamingCompatible]>;
def SVPMULLT : SInst<"svpmullt[_{d}]", "dhh", "UsUl", MergeNone, "", [IsStreamingCompatible]>;
def SVPMULLT_N : SInst<"svpmullt[_n_{d}]", "dhR", "UsUl", MergeNone, "", [IsStreamingCompatible]>;
-def SVPMULLT_PAIR : SInst<"svpmullt_pair[_{d}]", "ddd", "UcUi", MergeNone, "aarch64_sve_pmullt_pair">;
-def SVPMULLT_PAIR_N : SInst<"svpmullt_pair[_n_{d}]", "dda", "UcUi", MergeNone, "aarch64_sve_pmullt_pair">;
+def SVPMULLT_PAIR : SInst<"svpmullt_pair[_{d}]", "ddd", "UcUi", MergeNone, "aarch64_sve_pmullt_pair", [IsStreamingCompatible]>;
+def SVPMULLT_PAIR_N : SInst<"svpmullt_pair[_n_{d}]", "dda", "UcUi", MergeNone, "aarch64_sve_pmullt_pair", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_while_x2.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_while_x2.c
index acead9be3f01d2a..475fa14e1165af8 100644
--- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_while_x2.c
+++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_while_x2.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme2 -DTEST_SME -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -S -disable-O0-optnone -Werror -o /dev/null %s
-// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme2 -S -disable-O0-optnone -Werror -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme2 -DTEST_SME -S -disable-O0-optnone -Werror -o /dev/null %s
#include <arm_sve.h>
#ifdef SVE_OVERLOADED_FORMS
@@ -14,6 +14,12 @@
#define SVE_ACLE_FUNC(A1,A2,A3) A1##A2##A3
#endif
+#ifndef TEST_SME
+#define ATTR
+#else
+#define ATTR __arm_streaming
+#endif
+
// CHECK-LABEL: define dso_local <vscale x 32 x i1> @test_svwhilege_b8_s64(
// CHECK-SAME: i64 noundef [[OP1:%.*]], i64 noundef [[OP2:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
@@ -34,7 +40,7 @@
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP3]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP4]]
//
-svboolx2_t test_svwhilege_b8_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilege_b8_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilege_b8,_s64,_x2)(op1, op2);
}
@@ -58,7 +64,7 @@ svboolx2_t test_svwhilege_b8_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP3]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP4]]
//
-svboolx2_t test_svwhilege_b8_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilege_b8_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilege_b8,_u64,_x2)(op1, op2);
}
@@ -86,7 +92,7 @@ svboolx2_t test_svwhilege_b8_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilege_b16_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilege_b16_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilege_b16,_s64,_x2)(op1, op2);
}
@@ -114,7 +120,7 @@ svboolx2_t test_svwhilege_b16_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilege_b16_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilege_b16_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilege_b16,_u64,_x2)(op1, op2);
}
@@ -142,7 +148,7 @@ svboolx2_t test_svwhilege_b16_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilege_b32_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilege_b32_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilege_b32,_s64,_x2)(op1, op2);
}
@@ -170,7 +176,7 @@ svboolx2_t test_svwhilege_b32_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilege_b32_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilege_b32_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilege_b32,_u64,_x2)(op1, op2);
}
@@ -198,7 +204,7 @@ svboolx2_t test_svwhilege_b32_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilege_b64_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilege_b64_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilege_b64,_s64,_x2)(op1, op2);
}
@@ -226,7 +232,7 @@ svboolx2_t test_svwhilege_b64_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilege_b64_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilege_b64_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilege_b64,_u64,_x2)(op1, op2);
}
@@ -250,7 +256,7 @@ svboolx2_t test_svwhilege_b64_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP3]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP4]]
//
-svboolx2_t test_svwhilegt_b8_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilegt_b8_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilegt_b8,_s64,_x2)(op1, op2);
}
@@ -274,7 +280,7 @@ svboolx2_t test_svwhilegt_b8_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP3]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP4]]
//
-svboolx2_t test_svwhilegt_b8_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilegt_b8_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilegt_b8,_u64,_x2)(op1, op2);
}
@@ -302,7 +308,7 @@ svboolx2_t test_svwhilegt_b8_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilegt_b16_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilegt_b16_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilegt_b16,_s64,_x2)(op1, op2);
}
@@ -330,7 +336,7 @@ svboolx2_t test_svwhilegt_b16_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilegt_b16_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilegt_b16_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilegt_b16,_u64,_x2)(op1, op2);
}
@@ -358,7 +364,7 @@ svboolx2_t test_svwhilegt_b16_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilegt_b32_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilegt_b32_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilegt_b32,_s64,_x2)(op1, op2);
}
@@ -386,7 +392,7 @@ svboolx2_t test_svwhilegt_b32_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilegt_b32_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilegt_b32_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilegt_b32,_u64,_x2)(op1, op2);
}
@@ -414,7 +420,7 @@ svboolx2_t test_svwhilegt_b32_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilegt_b64_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilegt_b64_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilegt_b64,_s64,_x2)(op1, op2);
}
@@ -442,7 +448,7 @@ svboolx2_t test_svwhilegt_b64_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilegt_b64_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilegt_b64_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilegt_b64,_u64,_x2)(op1, op2);
}
@@ -466,7 +472,7 @@ svboolx2_t test_svwhilegt_b64_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP3]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP4]]
//
-svboolx2_t test_svwhilele_b8_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilele_b8_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilele_b8,_s64,_x2)(op1, op2);
}
@@ -490,7 +496,7 @@ svboolx2_t test_svwhilele_b8_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP3]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP4]]
//
-svboolx2_t test_svwhilele_b8_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilele_b8_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilele_b8,_u64,_x2)(op1, op2);
}
@@ -518,7 +524,7 @@ svboolx2_t test_svwhilele_b8_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilele_b16_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilele_b16_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilele_b16,_s64,_x2)(op1, op2);
}
@@ -546,7 +552,7 @@ svboolx2_t test_svwhilele_b16_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilele_b16_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilele_b16_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilele_b16,_u64,_x2)(op1, op2);
}
@@ -574,7 +580,7 @@ svboolx2_t test_svwhilele_b16_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilele_b32_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilele_b32_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilele_b32,_s64,_x2)(op1, op2);
}
@@ -602,7 +608,7 @@ svboolx2_t test_svwhilele_b32_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilele_b32_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilele_b32_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilele_b32,_u64,_x2)(op1, op2);
}
@@ -630,7 +636,7 @@ svboolx2_t test_svwhilele_b32_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilele_b64_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilele_b64_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilele_b64,_s64,_x2)(op1, op2);
}
@@ -658,7 +664,7 @@ svboolx2_t test_svwhilele_b64_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilele_b64_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilele_b64_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilele_b64,_u64,_x2)(op1, op2);
}
@@ -682,7 +688,7 @@ svboolx2_t test_svwhilele_b64_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP3]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP4]]
//
-svboolx2_t test_svwhilelt_b8_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilelt_b8_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilelt_b8,_s64,_x2)(op1, op2);
}
@@ -706,7 +712,7 @@ svboolx2_t test_svwhilelt_b8_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP3]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP4]]
//
-svboolx2_t test_svwhilelt_b8_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilelt_b8_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilelt_b8,_u64,_x2)(op1, op2);
}
@@ -734,7 +740,7 @@ svboolx2_t test_svwhilelt_b8_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilelt_b16_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilelt_b16_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilelt_b16,_s64,_x2)(op1, op2);
}
@@ -762,7 +768,7 @@ svboolx2_t test_svwhilelt_b16_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilelt_b16_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilelt_b16_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilelt_b16,_u64,_x2)(op1, op2);
}
@@ -790,7 +796,7 @@ svboolx2_t test_svwhilelt_b16_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilelt_b32_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilelt_b32_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilelt_b32,_s64,_x2)(op1, op2);
}
@@ -818,7 +824,7 @@ svboolx2_t test_svwhilelt_b32_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilelt_b32_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilelt_b32_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilelt_b32,_u64,_x2)(op1, op2);
}
@@ -846,7 +852,7 @@ svboolx2_t test_svwhilelt_b32_u64(uint64_t op1, uint64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilelt_b64_s64(int64_t op1, int64_t op2) {
+svboolx2_t test_svwhilelt_b64_s64(int64_t op1, int64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilelt_b64,_s64,_x2)(op1, op2);
}
@@ -874,6 +880,6 @@ svboolx2_t test_svwhilelt_b64_s64(int64_t op1, int64_t op2) {
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]], i64 16)
// CPP-CHECK-NEXT: ret <vscale x 32 x i1> [[TMP6]]
//
-svboolx2_t test_svwhilelt_b64_u64(uint64_t op1, uint64_t op2) {
+svboolx2_t test_svwhilelt_b64_u64(uint64_t op1, uint64_t op2) ATTR {
return SVE_ACLE_FUNC(svwhilelt_b64,_u64,_x2)(op1, op2);
}
More information about the cfe-commits
mailing list