[clang] 6f1e430 - [AArch64] Alter v8.5a FRINT neon intrinsics to be target-based, not preprocessor based
David Green via cfe-commits
cfe-commits at lists.llvm.org
Mon Oct 24 03:22:11 PDT 2022
Author: David Green
Date: 2022-10-24T11:22:06+01:00
New Revision: 6f1e430360591e22fb163ec77b78efd2de4c1d95
URL: https://github.com/llvm/llvm-project/commit/6f1e430360591e22fb163ec77b78efd2de4c1d95
DIFF: https://github.com/llvm/llvm-project/commit/6f1e430360591e22fb163ec77b78efd2de4c1d95.diff
LOG: [AArch64] Alter v8.5a FRINT neon intrinsics to be target-based, not preprocessor based
This switches the v8.5-a FRINT intrinsics over to be target-gated,
behind preprocessor defines. This one is pretty simple, being AArch64
only.
Differential Revision: https://reviews.llvm.org/D135646
Added:
Modified:
clang/include/clang/Basic/arm_neon.td
clang/lib/CodeGen/CGBuiltin.cpp
clang/test/Sema/aarch64-neon-target.c
Removed:
################################################################################
diff --git a/clang/include/clang/Basic/arm_neon.td b/clang/include/clang/Basic/arm_neon.td
index a5e9dc2f16390..a7737a5f81e06 100644
--- a/clang/include/clang/Basic/arm_neon.td
+++ b/clang/include/clang/Basic/arm_neon.td
@@ -1225,7 +1225,7 @@ def FRINTZ_S64 : SInst<"vrnd", "..", "dQd">;
def FRINTI_S64 : SInst<"vrndi", "..", "dQd">;
}
-let ArchGuard = "defined(__aarch64__) && defined(__ARM_FEATURE_FRINT)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.5a" in {
def FRINT32X_S32 : SInst<"vrnd32x", "..", "fQf">;
def FRINT32Z_S32 : SInst<"vrnd32z", "..", "fQf">;
def FRINT64X_S32 : SInst<"vrnd64x", "..", "fQf">;
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 2a3da01febe2b..fbb6e85e37d6e 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -6122,14 +6122,14 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
- NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType),
- NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType),
- NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType),
- NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType),
- NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType),
- NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType),
- NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType),
- NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP1(vrnd32x_f32, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32xq_f32, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32z_f32, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd32zq_f32, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd64x_f32, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64xq_f32, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64z_f32, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP1(vrnd64zq_f32, aarch64_neon_frint64z, Add1ArgType),
NEONMAP0(vrndi_v),
NEONMAP0(vrndiq_v),
NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
@@ -11313,26 +11313,26 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
: Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
}
- case NEON::BI__builtin_neon_vrnd32x_v:
- case NEON::BI__builtin_neon_vrnd32xq_v: {
+ case NEON::BI__builtin_neon_vrnd32x_f32:
+ case NEON::BI__builtin_neon_vrnd32xq_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint32x;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x");
}
- case NEON::BI__builtin_neon_vrnd32z_v:
- case NEON::BI__builtin_neon_vrnd32zq_v: {
+ case NEON::BI__builtin_neon_vrnd32z_f32:
+ case NEON::BI__builtin_neon_vrnd32zq_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint32z;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z");
}
- case NEON::BI__builtin_neon_vrnd64x_v:
- case NEON::BI__builtin_neon_vrnd64xq_v: {
+ case NEON::BI__builtin_neon_vrnd64x_f32:
+ case NEON::BI__builtin_neon_vrnd64xq_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint64x;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x");
}
- case NEON::BI__builtin_neon_vrnd64z_v:
- case NEON::BI__builtin_neon_vrnd64zq_v: {
+ case NEON::BI__builtin_neon_vrnd64z_f32:
+ case NEON::BI__builtin_neon_vrnd64zq_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint64z;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
diff --git a/clang/test/Sema/aarch64-neon-target.c b/clang/test/Sema/aarch64-neon-target.c
index 5007f8f7dfae9..8b0f7f6006502 100644
--- a/clang/test/Sema/aarch64-neon-target.c
+++ b/clang/test/Sema/aarch64-neon-target.c
@@ -41,6 +41,11 @@ void bf16(uint32x2_t v2i32, uint32x4_t v4i32, uint16x8_t v8i16, uint8x16_t v16i8
vcvt_bf16_f32(v4f32);
}
+__attribute__((target("arch=armv8.5-a")))
+void test_v85(float32x4_t v4f32) {
+ vrnd32xq_f32(v4f32);
+}
+
void undefined(uint32x2_t v2i32, uint32x4_t v4i32, uint16x8_t v8i16, uint8x16_t v16i8, uint8x8_t v8i8, float32x2_t v2f32, float32x4_t v4f32, float16x4_t v4f16, bfloat16x4_t v4bf16, __bf16 bf16) {
// dotprod
vdot_u32(v2i32, v8i8, v8i8); // expected-error {{always_inline function 'vdot_u32' requires target feature 'dotprod'}}
@@ -63,4 +68,6 @@ void undefined(uint32x2_t v2i32, uint32x4_t v4i32, uint16x8_t v8i16, uint8x16_t
vld1_bf16(0); // expected-error {{'__builtin_neon_vld1_bf16' needs target feature bf16}}
vcvt_f32_bf16(v4bf16); // expected-error {{always_inline function 'vcvt_f32_bf16' requires target feature 'bf16'}}
vcvt_bf16_f32(v4f32); // expected-error {{always_inline function 'vcvt_bf16_f32' requires target feature 'bf16'}}
+ // 8.5 - frint
+ vrnd32xq_f32(v4f32); // expected-error {{always_inline function 'vrnd32xq_f32' requires target feature 'v8.5a'}}
}
More information about the cfe-commits
mailing list