[clang] [llvm] [LV][LAA] Vectorize math lib calls with mem write-only attribute (PR #78432)
Paschalis Mpeis via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 27 08:35:07 PST 2024
https://github.com/paschalis-mpeis updated https://github.com/llvm/llvm-project/pull/78432
>From a74ba110994e4535cd6c9206aa02d50503fb5577 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Tue, 27 Feb 2024 15:00:28 +0000
Subject: [PATCH 1/7] [AArch64][TLI] Add TLI mappings for ArmPL modf, sincos,
sincospi
---
llvm/include/llvm/Analysis/VecFuncs.def | 6 ++
.../AArch64/veclib-function-calls.ll | 66 ++++++++++---------
llvm/test/Transforms/Util/add-TLI-mappings.ll | 32 +++++++--
3 files changed, 67 insertions(+), 37 deletions(-)
diff --git a/llvm/include/llvm/Analysis/VecFuncs.def b/llvm/include/llvm/Analysis/VecFuncs.def
index 394e4a05fbc0cf..10f1333cf8885c 100644
--- a/llvm/include/llvm/Analysis/VecFuncs.def
+++ b/llvm/include/llvm/Analysis/VecFuncs.def
@@ -1005,6 +1005,8 @@ TLI_DEFINE_VECFUNC("llvm.log2.f32", "armpl_svlog2_f32_x", SCALABLE(4), MASKED, "
TLI_DEFINE_VECFUNC("modf", "armpl_vmodfq_f64", FIXED(2), NOMASK, "_ZGV_LLVM_N2vl8")
TLI_DEFINE_VECFUNC("modff", "armpl_vmodfq_f32", FIXED(4), NOMASK, "_ZGV_LLVM_N4vl4")
+TLI_DEFINE_VECFUNC("modf", "armpl_svmodf_f64_x", SCALABLE(2), MASKED, "_ZGVsMxvl8")
+TLI_DEFINE_VECFUNC("modff", "armpl_svmodf_f32_x", SCALABLE(4), MASKED, "_ZGVsMxvl4")
TLI_DEFINE_VECFUNC("nextafter", "armpl_vnextafterq_f64", FIXED(2), NOMASK, "_ZGV_LLVM_N2vv")
TLI_DEFINE_VECFUNC("nextafterf", "armpl_vnextafterq_f32", FIXED(4), NOMASK, "_ZGV_LLVM_N4vv")
@@ -1033,9 +1035,13 @@ TLI_DEFINE_VECFUNC("llvm.sin.f32", "armpl_svsin_f32_x", SCALABLE(4), MASKED, "_Z
TLI_DEFINE_VECFUNC("sincos", "armpl_vsincosq_f64", FIXED(2), NOMASK, "_ZGV_LLVM_N2vl8l8")
TLI_DEFINE_VECFUNC("sincosf", "armpl_vsincosq_f32", FIXED(4), NOMASK, "_ZGV_LLVM_N4vl4l4")
+TLI_DEFINE_VECFUNC("sincos", "armpl_svsincos_f64_x", SCALABLE(2), MASKED, "_ZGVsMxvl8l8")
+TLI_DEFINE_VECFUNC("sincosf", "armpl_svsincos_f32_x", SCALABLE(4), MASKED, "_ZGVsMxvl4l4")
TLI_DEFINE_VECFUNC("sincospi", "armpl_vsincospiq_f64", FIXED(2), NOMASK, "_ZGV_LLVM_N2vl8l8")
TLI_DEFINE_VECFUNC("sincospif", "armpl_vsincospiq_f32", FIXED(4), NOMASK, "_ZGV_LLVM_N4vl4l4")
+TLI_DEFINE_VECFUNC("sincospi", "armpl_svsincospi_f64_x", SCALABLE(2), MASKED, "_ZGVsMxvl8l8")
+TLI_DEFINE_VECFUNC("sincospif", "armpl_svsincospi_f32_x", SCALABLE(4), MASKED, "_ZGVsMxvl4l4")
TLI_DEFINE_VECFUNC("sinh", "armpl_vsinhq_f64", FIXED(2), NOMASK, "_ZGV_LLVM_N2v")
TLI_DEFINE_VECFUNC("sinhf", "armpl_vsinhq_f32", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll
index dd1495626eb984..d9cc630482fc80 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll
@@ -2925,11 +2925,12 @@ define void @modf_f64(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
;
; ARMPL-SVE-LABEL: define void @modf_f64
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]]) #[[ATTR4:[0-9]+]]
+; ARMPL-SVE: [[TMP23:%.*]] = call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP22:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @modf_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: [[TMP5:%.*]] = call <2 x double> @armpl_vmodfq_f64(<2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP4:%.*]])
+; ARMPL-SVE-NOPRED: [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP16:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
+; ARMPL-SVE-NOPRED: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]]) #[[ATTR64:[0-9]+]]
;
entry:
br label %for.body
@@ -2970,11 +2971,12 @@ define void @modf_f32(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
;
; ARMPL-SVE-LABEL: define void @modf_f32
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]]) #[[ATTR5:[0-9]+]]
+; ARMPL-SVE: [[TMP23:%.*]] = call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP22:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @modf_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: [[TMP5:%.*]] = call <4 x float> @armpl_vmodfq_f32(<4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP4:%.*]])
+; ARMPL-SVE-NOPRED: [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP16:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
+; ARMPL-SVE-NOPRED: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]]) #[[ATTR65:[0-9]+]]
;
entry:
br label %for.body
@@ -3023,7 +3025,7 @@ define void @nextafter_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @nextafter_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 2 x double> @armpl_svnextafter_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], <vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @nextafter(double [[IN:%.*]], double [[IN]]) #[[ATTR64:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @nextafter(double [[IN:%.*]], double [[IN]]) #[[ATTR66:[0-9]+]]
;
entry:
br label %for.body
@@ -3068,7 +3070,7 @@ define void @nextafter_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @nextafter_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 4 x float> @armpl_svnextafter_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @nextafterf(float [[IN:%.*]], float [[IN]]) #[[ATTR65:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @nextafterf(float [[IN:%.*]], float [[IN]]) #[[ATTR67:[0-9]+]]
;
entry:
br label %for.body
@@ -3116,7 +3118,7 @@ define void @pow_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @pow_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 2 x double> @armpl_svpow_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], <vscale x 2 x double> [[WIDE_LOAD]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @pow(double [[IN:%.*]], double [[IN]]) #[[ATTR66:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @pow(double [[IN:%.*]], double [[IN]]) #[[ATTR68:[0-9]+]]
;
entry:
br label %for.body
@@ -3161,7 +3163,7 @@ define void @pow_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @pow_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 4 x float> @armpl_svpow_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @powf(float [[IN:%.*]], float [[IN]]) #[[ATTR67:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @powf(float [[IN:%.*]], float [[IN]]) #[[ATTR69:[0-9]+]]
;
entry:
br label %for.body
@@ -3209,7 +3211,7 @@ define void @sin_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @sin_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @sin(double [[IN:%.*]]) #[[ATTR68:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @sin(double [[IN:%.*]]) #[[ATTR70:[0-9]+]]
;
entry:
br label %for.body
@@ -3254,7 +3256,7 @@ define void @sin_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @sin_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 4 x float> @armpl_svsin_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @sinf(float [[IN:%.*]]) #[[ATTR69:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @sinf(float [[IN:%.*]]) #[[ATTR71:[0-9]+]]
;
entry:
br label %for.body
@@ -3297,11 +3299,12 @@ define void @sincos_f64(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
;
; ARMPL-SVE-LABEL: define void @sincos_f64
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR6:[0-9]+]]
+; ARMPL-SVE: call void @armpl_svsincos_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP23:%.*]], ptr [[TMP24:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @sincos_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: call void @armpl_vsincosq_f64(<2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; ARMPL-SVE-NOPRED: call void @armpl_svsincos_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
+; ARMPL-SVE-NOPRED: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR72:[0-9]+]]
;
entry:
br label %for.body
@@ -3341,11 +3344,12 @@ define void @sincos_f32(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
;
; ARMPL-SVE-LABEL: define void @sincos_f32
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR7:[0-9]+]]
+; ARMPL-SVE: call void @armpl_svsincos_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP23:%.*]], ptr [[TMP24:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @sincos_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: call void @armpl_vsincosq_f32(<4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; ARMPL-SVE-NOPRED: call void @armpl_svsincos_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
+; ARMPL-SVE-NOPRED: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR73:[0-9]+]]
;
entry:
br label %for.body
@@ -3388,11 +3392,12 @@ define void @sincospi_f64(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
;
; ARMPL-SVE-LABEL: define void @sincospi_f64
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR8:[0-9]+]]
+; ARMPL-SVE: call void @armpl_svsincospi_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP23:%.*]], ptr [[TMP24:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @sincospi_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: call void @armpl_vsincospiq_f64(<2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; ARMPL-SVE-NOPRED: call void @armpl_svsincospi_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
+; ARMPL-SVE-NOPRED: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR74:[0-9]+]]
;
entry:
br label %for.body
@@ -3432,11 +3437,12 @@ define void @sincospi_f32(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
;
; ARMPL-SVE-LABEL: define void @sincospi_f32
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR9:[0-9]+]]
+; ARMPL-SVE: call void @armpl_svsincospi_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP23:%.*]], ptr [[TMP24:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @sincospi_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: call void @armpl_vsincospiq_f32(<4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; ARMPL-SVE-NOPRED: call void @armpl_svsincospi_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
+; ARMPL-SVE-NOPRED: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR75:[0-9]+]]
;
entry:
br label %for.body
@@ -3484,7 +3490,7 @@ define void @sinh_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @sinh_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 2 x double> @armpl_svsinh_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @sinh(double [[IN:%.*]]) #[[ATTR70:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @sinh(double [[IN:%.*]]) #[[ATTR76:[0-9]+]]
;
entry:
br label %for.body
@@ -3529,7 +3535,7 @@ define void @sinh_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @sinh_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 4 x float> @armpl_svsinh_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @sinhf(float [[IN:%.*]]) #[[ATTR71:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @sinhf(float [[IN:%.*]]) #[[ATTR77:[0-9]+]]
;
entry:
br label %for.body
@@ -3577,7 +3583,7 @@ define void @sinpi_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @sinpi_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 2 x double> @armpl_svsinpi_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @sinpi(double [[IN:%.*]]) #[[ATTR72:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @sinpi(double [[IN:%.*]]) #[[ATTR78:[0-9]+]]
;
entry:
br label %for.body
@@ -3622,7 +3628,7 @@ define void @sinpi_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @sinpi_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 4 x float> @armpl_svsinpi_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @sinpif(float [[IN:%.*]]) #[[ATTR73:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @sinpif(float [[IN:%.*]]) #[[ATTR79:[0-9]+]]
;
entry:
br label %for.body
@@ -3670,7 +3676,7 @@ define void @sqrt_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @sqrt_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 2 x double> @armpl_svsqrt_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @sqrt(double [[IN:%.*]]) #[[ATTR74:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @sqrt(double [[IN:%.*]]) #[[ATTR80:[0-9]+]]
;
entry:
br label %for.body
@@ -3715,7 +3721,7 @@ define void @sqrt_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @sqrt_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 4 x float> @armpl_svsqrt_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @sqrtf(float [[IN:%.*]]) #[[ATTR75:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @sqrtf(float [[IN:%.*]]) #[[ATTR81:[0-9]+]]
;
entry:
br label %for.body
@@ -3763,7 +3769,7 @@ define void @tan_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @tan_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 2 x double> @armpl_svtan_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @tan(double [[IN:%.*]]) #[[ATTR76:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @tan(double [[IN:%.*]]) #[[ATTR82:[0-9]+]]
;
entry:
br label %for.body
@@ -3808,7 +3814,7 @@ define void @tan_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @tan_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 4 x float> @armpl_svtan_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @tanf(float [[IN:%.*]]) #[[ATTR77:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @tanf(float [[IN:%.*]]) #[[ATTR83:[0-9]+]]
;
entry:
br label %for.body
@@ -3856,7 +3862,7 @@ define void @tanh_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @tanh_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 2 x double> @armpl_svtanh_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @tanh(double [[IN:%.*]]) #[[ATTR78:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @tanh(double [[IN:%.*]]) #[[ATTR84:[0-9]+]]
;
entry:
br label %for.body
@@ -3901,7 +3907,7 @@ define void @tanh_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @tanh_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 4 x float> @armpl_svtanh_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @tanhf(float [[IN:%.*]]) #[[ATTR79:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @tanhf(float [[IN:%.*]]) #[[ATTR85:[0-9]+]]
;
entry:
br label %for.body
@@ -3949,7 +3955,7 @@ define void @tgamma_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @tgamma_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 2 x double> @armpl_svtgamma_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @tgamma(double [[IN:%.*]]) #[[ATTR80:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call double @tgamma(double [[IN:%.*]]) #[[ATTR86:[0-9]+]]
;
entry:
br label %for.body
@@ -3994,7 +4000,7 @@ define void @tgamma_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; ARMPL-SVE-NOPRED-LABEL: define void @tgamma_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
; ARMPL-SVE-NOPRED: [[TMP9:%.*]] = call <vscale x 4 x float> @armpl_svtgamma_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @tgammaf(float [[IN:%.*]]) #[[ATTR81:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[CALL:%.*]] = tail call float @tgammaf(float [[IN:%.*]]) #[[ATTR87:[0-9]+]]
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/Util/add-TLI-mappings.ll b/llvm/test/Transforms/Util/add-TLI-mappings.ll
index d86e44f199b391..0e005ae75ef5a8 100644
--- a/llvm/test/Transforms/Util/add-TLI-mappings.ll
+++ b/llvm/test/Transforms/Util/add-TLI-mappings.ll
@@ -46,15 +46,21 @@
; SLEEFGNUABI-SAME: ptr @_ZGVsNxvl4l4_sincospif,
; SLEEFGNUABI_SAME; ptr @_ZGVnN4v_log10f,
; SLEEFGNUABI-SAME: ptr @_ZGVsMxv_log10f
-; ARMPL-SAME: [10 x ptr] [
+; ARMPL-SAME: [16 x ptr] [
; ARMPL-SAME: ptr @armpl_vmodfq_f64,
+; ARMPL-SAME: ptr @armpl_svmodf_f64_x,
; ARMPL-SAME: ptr @armpl_vmodfq_f32,
+; ARMPL-SAME: ptr @armpl_svmodf_f32_x,
; ARMPL-SAME: ptr @armpl_vsinq_f64,
; ARMPL-SAME: ptr @armpl_svsin_f64_x,
; ARMPL-SAME: ptr @armpl_vsincosq_f64,
+; ARMPL-SAME: ptr @armpl_svsincos_f64_x,
; ARMPL-SAME: ptr @armpl_vsincosq_f32,
+; ARMPL-SAME: ptr @armpl_svsincos_f32_x,
; ARMPL-SAME: ptr @armpl_vsincospiq_f64,
+; ARMPL-SAME: ptr @armpl_svsincospi_f64_x,
; ARMPL-SAME: ptr @armpl_vsincospiq_f32,
+; ARMPL-SAME: ptr @armpl_svsincospi_f32_x,
; ARMPL-SAME: ptr @armpl_vlog10q_f32,
; ARMPL-SAME: ptr @armpl_svlog10_f32_x
; COMMON-SAME: ], section "llvm.metadata"
@@ -195,13 +201,19 @@ declare float @llvm.log10.f32(float) #0
; SLEEFGNUABI: declare <vscale x 4 x float> @_ZGVsMxv_log10f(<vscale x 4 x float>, <vscale x 4 x i1>)
; ARMPL: declare <2 x double> @armpl_vmodfq_f64(<2 x double>, ptr)
+; ARMPL: declare <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double>, ptr, <vscale x 2 x i1>)
; ARMPL: declare <4 x float> @armpl_vmodfq_f32(<4 x float>, ptr)
+; ARMPL: declare <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float>, ptr, <vscale x 4 x i1>)
; ARMPL: declare <2 x double> @armpl_vsinq_f64(<2 x double>)
; ARMPL: declare <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double>, <vscale x 2 x i1>)
; ARMPL: declare void @armpl_vsincosq_f64(<2 x double>, ptr, ptr)
+; ARMPL: declare void @armpl_svsincos_f64_x(<vscale x 2 x double>, ptr, ptr, <vscale x 2 x i1>)
; ARMPL: declare void @armpl_vsincosq_f32(<4 x float>, ptr, ptr)
+; ARMPL: declare void @armpl_svsincos_f32_x(<vscale x 4 x float>, ptr, ptr, <vscale x 4 x i1>)
; ARMPL: declare void @armpl_vsincospiq_f64(<2 x double>, ptr, ptr)
+; ARMPL: declare void @armpl_svsincospi_f64_x(<vscale x 2 x double>, ptr, ptr, <vscale x 2 x i1>)
; ARMPL: declare void @armpl_vsincospiq_f32(<4 x float>, ptr, ptr)
+; ARMPL: declare void @armpl_svsincospi_f32_x(<vscale x 4 x float>, ptr, ptr, <vscale x 4 x i1>)
; ARMPL: declare <4 x float> @armpl_vlog10q_f32(<4 x float>)
; ARMPL: declare <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float>, <vscale x 4 x i1>)
@@ -255,20 +267,26 @@ attributes #0 = { nounwind readnone }
; SLEEFGNUABI-SAME: _ZGVsMxv_llvm.log10.f32(_ZGVsMxv_log10f)" }
; ARMPL: attributes #[[MODF]] = { "vector-function-abi-variant"=
-; ARMPL-SAME: "_ZGV_LLVM_N2vl8_modf(armpl_vmodfq_f64)" }
+; ARMPL-SAME: "_ZGV_LLVM_N2vl8_modf(armpl_vmodfq_f64),
+; ARMPL-SAME: _ZGVsMxvl8_modf(armpl_svmodf_f64_x)" }
; ARMPL: attributes #[[MODFF]] = { "vector-function-abi-variant"=
-; ARMPL-SAME: "_ZGV_LLVM_N4vl4_modff(armpl_vmodfq_f32)" }
+; ARMPL-SAME: "_ZGV_LLVM_N4vl4_modff(armpl_vmodfq_f32),
+; ARMPL-SAME: _ZGVsMxvl4_modff(armpl_svmodf_f32_x)" }
; ARMPL: attributes #[[SIN]] = { "vector-function-abi-variant"=
; ARMPL-SAME: "_ZGV_LLVM_N2v_sin(armpl_vsinq_f64),
; ARMPL-SAME _ZGVsMxv_sin(armpl_svsin_f64_x)" }
; ARMPL: attributes #[[SINCOS]] = { "vector-function-abi-variant"=
-; ARMPL-SAME: "_ZGV_LLVM_N2vl8l8_sincos(armpl_vsincosq_f64)" }
+; ARMPL-SAME: "_ZGV_LLVM_N2vl8l8_sincos(armpl_vsincosq_f64),
+; ARMPL-SAME: _ZGVsMxvl8l8_sincos(armpl_svsincos_f64_x)" }
; ARMPL: attributes #[[SINCOSF]] = { "vector-function-abi-variant"=
-; ARMPL-SAME: "_ZGV_LLVM_N4vl4l4_sincosf(armpl_vsincosq_f32)" }
+; ARMPL-SAME: "_ZGV_LLVM_N4vl4l4_sincosf(armpl_vsincosq_f32),
+; ARMPL-SAME: _ZGVsMxvl4l4_sincosf(armpl_svsincos_f32_x)" }
; ARMPL: attributes #[[SINCOSPI]] = { "vector-function-abi-variant"=
-; ARMPL-SAME: "_ZGV_LLVM_N2vl8l8_sincospi(armpl_vsincospiq_f64)" }
+; ARMPL-SAME: "_ZGV_LLVM_N2vl8l8_sincospi(armpl_vsincospiq_f64),
+; ARMPL-SAME: _ZGVsMxvl8l8_sincospi(armpl_svsincospi_f64_x)" }
; ARMPL: attributes #[[SINCOSPIF]] = { "vector-function-abi-variant"=
-; ARMPL-SAME: "_ZGV_LLVM_N4vl4l4_sincospif(armpl_vsincospiq_f32)" }
+; ARMPL-SAME: "_ZGV_LLVM_N4vl4l4_sincospif(armpl_vsincospiq_f32),
+; ARMPL-SAME: _ZGVsMxvl4l4_sincospif(armpl_svsincospi_f32_x)" }
; ARMPL: attributes #[[LOG10]] = { "vector-function-abi-variant"=
; ARMPL-SAME: "_ZGV_LLVM_N4v_llvm.log10.f32(armpl_vlog10q_f32),
; ARMPL-SAME _ZGVsMxv_llvm.log10.f32(armpl_svlog10_f32_x)" }
>From b0564fa66d474fd45326dae6429e5e592ec22a85 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Tue, 16 Jan 2024 10:53:09 +0000
Subject: [PATCH 2/7] LAA cannot vectorize lib calls like modf/modff
Functions like modf/modff are math lib calls that set memory write-only
attribute. Given that a target has vectorized mappings, LAA should allow
vectorization.
---
...arch64-veclib-function-calls-linear-ptrs.c | 57 +++++++++++++++++++
1 file changed, 57 insertions(+)
create mode 100644 clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
new file mode 100644
index 00000000000000..a449fac147058a
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
@@ -0,0 +1,57 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --filter "call.*(frexp|modf)" --version 4
+// RUN: %clang --target=aarch64-linux-gnu -march=armv8-a+sve -O3 -mllvm -vector-library=ArmPL -mllvm -force-vector-interleave=1 -mllvm -prefer-predicate-over-epilogue=predicate-dont-vectorize -emit-llvm -S -o - %s | FileCheck %s
+
+// REQUIRES: aarch64-registered-target
+
+/*
+Testing vectorization of math functions that have the attribute write-only to
+memory set. Given they have vectorized counterparts, they should be able to
+vectorize.
+*/
+
+// The following define is required to access some math functions.
+#define _GNU_SOURCE
+#include <math.h>
+
+// frexp/frexpf have no TLI mappings yet.
+
+// CHECK-LABEL: define dso_local void @frexp_f64(
+// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// CHECK: [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2:[0-9]+]]
+//
+void frexp_f64(double *in, double *out1, int *out2, int N) {
+ for (int i = 0; i < N; ++i)
+ *out1 = frexp(in[i], out2+i);
+}
+
+// CHECK-LABEL: define dso_local void @frexp_f32(
+// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK: [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2]]
+//
+void frexp_f32(float *in, float *out1, int *out2, int N) {
+ for (int i = 0; i < N; ++i)
+ *out1 = frexpf(in[i], out2+i);
+}
+
+
+// TODO: LAA must allow vectorization.
+
+// CHECK-LABEL: define dso_local void @modf_f64(
+// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK: [[CALL:%.*]] = tail call double @modf(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR3:[0-9]+]]
+//
+void modf_f64(double *in, double *out1, double *out2, int N) {
+ for (int i = 0; i < N; ++i)
+ out1[i] = modf(in[i], out2+i);
+}
+
+// TODO: LAA must allow vectorization.
+
+// CHECK-LABEL: define dso_local void @modf_f32(
+// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK: [[CALL:%.*]] = tail call float @modff(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR4:[0-9]+]]
+//
+void modf_f32(float *in, float *out1, float *out2, int N) {
+ for (int i = 0; i < N; ++i)
+ out1[i] = modff(in[i], out2+i);
+}
>From a0c54a3aabd00011f28fdb40661f4c002fbfb5d9 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Wed, 17 Jan 2024 09:44:45 +0000
Subject: [PATCH 3/7] [LV][LAA] Vectorize math lib calls with mem write-only
attribute
Teach LAA to consider safe specific math lib calls which are known to
have set the memory write-only attribute. Those attributes are set to
calls by inferNonMandatoryLibFuncAttrs, in BuildLibCalls.cpp, and the
current ones are modf/modff and frexp/frexpf.
This happens only when the calls are found through TLI to have
vectorized counterparts.
---
...arch64-veclib-function-calls-linear-ptrs.c | 15 ++++++---------
llvm/lib/Analysis/LoopAccessAnalysis.cpp | 19 +++++++++++++++++++
2 files changed, 25 insertions(+), 9 deletions(-)
diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
index a449fac147058a..957b3f5cb235d3 100644
--- a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
+++ b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
@@ -17,7 +17,7 @@ vectorize.
// CHECK-LABEL: define dso_local void @frexp_f64(
// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-// CHECK: [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2:[0-9]+]]
+// CHECK: [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5:[0-9]+]]
//
void frexp_f64(double *in, double *out1, int *out2, int N) {
for (int i = 0; i < N; ++i)
@@ -26,30 +26,27 @@ void frexp_f64(double *in, double *out1, int *out2, int N) {
// CHECK-LABEL: define dso_local void @frexp_f32(
// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK: [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2]]
+// CHECK: [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5]]
//
void frexp_f32(float *in, float *out1, int *out2, int N) {
for (int i = 0; i < N; ++i)
*out1 = frexpf(in[i], out2+i);
}
-
-// TODO: LAA must allow vectorization.
-
// CHECK-LABEL: define dso_local void @modf_f64(
// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK: [[CALL:%.*]] = tail call double @modf(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR3:[0-9]+]]
+// CHECK: [[TMP11:%.*]] = tail call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP10:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+// CHECK: [[CALL:%.*]] = tail call double @modf(double noundef [[TMP14:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR6:[0-9]+]]
//
void modf_f64(double *in, double *out1, double *out2, int N) {
for (int i = 0; i < N; ++i)
out1[i] = modf(in[i], out2+i);
}
-// TODO: LAA must allow vectorization.
-
// CHECK-LABEL: define dso_local void @modf_f32(
// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK: [[CALL:%.*]] = tail call float @modff(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR4:[0-9]+]]
+// CHECK: [[TMP11:%.*]] = tail call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP10:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+// CHECK: [[CALL:%.*]] = tail call float @modff(float noundef [[TMP14:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR7:[0-9]+]]
//
void modf_f32(float *in, float *out1, float *out2, int N) {
for (int i = 0; i < N; ++i)
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index dd6b88fee415a7..523d60b3ed15a2 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2309,6 +2309,20 @@ bool LoopAccessInfo::canAnalyzeLoop() {
return true;
}
+/// Returns whether \p I is a known math library call that has memory write-only
+/// attribute set.
+static bool isMathLibCallMemWriteOnly(const TargetLibraryInfo *TLI,
+ const Instruction &I) {
+ auto *Call = dyn_cast<CallInst>(&I);
+ if (!Call)
+ return false;
+
+ LibFunc Func;
+ TLI->getLibFunc(*Call, Func);
+ return Func == LibFunc::LibFunc_modf || Func == LibFunc::LibFunc_modff ||
+ Func == LibFunc::LibFunc_frexp || Func == LibFunc::LibFunc_frexpf;
+}
+
void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
const TargetLibraryInfo *TLI,
DominatorTree *DT) {
@@ -2405,6 +2419,11 @@ void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
// Save 'store' instructions. Abort if other instructions write to memory.
if (I.mayWriteToMemory()) {
+ // We can safety handle math functions that have vectorized
+ // counterparts and have the memory write-only attribute set.
+ if (isMathLibCallMemWriteOnly(TLI, I))
+ continue;
+
auto *St = dyn_cast<StoreInst>(&I);
if (!St) {
recordAnalysis("CantVectorizeInstruction", St)
>From 53a165c35f66cd81ec65b45ea9cea7e55c0e4742 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Thu, 18 Jan 2024 14:14:00 +0000
Subject: [PATCH 4/7] Add check for the 'memory(argmem: write)' attribute.
---
llvm/lib/Analysis/LoopAccessAnalysis.cpp | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 523d60b3ed15a2..0a57782921b687 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2309,18 +2309,24 @@ bool LoopAccessInfo::canAnalyzeLoop() {
return true;
}
-/// Returns whether \p I is a known math library call that has memory write-only
-/// attribute set.
+/// Returns whether \p I is a known math library call that has attribute
+/// 'memory(argmem: write)' set.
static bool isMathLibCallMemWriteOnly(const TargetLibraryInfo *TLI,
const Instruction &I) {
auto *Call = dyn_cast<CallInst>(&I);
if (!Call)
return false;
+ Function *F = Call->getCalledFunction();
+ if (!F->hasFnAttribute(Attribute::AttrKind::Memory))
+ return false;
+
+ auto ME = F->getFnAttribute(Attribute::AttrKind::Memory).getMemoryEffects();
LibFunc Func;
TLI->getLibFunc(*Call, Func);
- return Func == LibFunc::LibFunc_modf || Func == LibFunc::LibFunc_modff ||
- Func == LibFunc::LibFunc_frexp || Func == LibFunc::LibFunc_frexpf;
+ return ME.onlyWritesMemory() && ME.onlyAccessesArgPointees() &&
+ (Func == LibFunc::LibFunc_modf || Func == LibFunc::LibFunc_modff ||
+ Func == LibFunc::LibFunc_frexp || Func == LibFunc::LibFunc_frexpf);
}
void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
>From 91d67cc44559d0eeb3318b72d697d70064d045dd Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Fri, 19 Jan 2024 10:20:50 +0000
Subject: [PATCH 5/7] Addressing reviewers
---
.../CodeGen/aarch64-veclib-function-calls-linear-ptrs.c | 2 +-
llvm/lib/Analysis/LoopAccessAnalysis.cpp | 6 +-----
2 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
index 957b3f5cb235d3..98085a183f46c4 100644
--- a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
+++ b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
@@ -1,5 +1,5 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --filter "call.*(frexp|modf)" --version 4
-// RUN: %clang --target=aarch64-linux-gnu -march=armv8-a+sve -O3 -mllvm -vector-library=ArmPL -mllvm -force-vector-interleave=1 -mllvm -prefer-predicate-over-epilogue=predicate-dont-vectorize -emit-llvm -S -o - %s | FileCheck %s
+// RUN: %clang --target=aarch64-linux-gnu -march=armv8-a+sve -O3 -isystem %S/../Headers/Inputs/include -mllvm -vector-library=ArmPL -mllvm -force-vector-interleave=1 -mllvm -prefer-predicate-over-epilogue=predicate-dont-vectorize -emit-llvm -S -o - %s | FileCheck %s
// REQUIRES: aarch64-registered-target
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 0a57782921b687..03e096298a7bee 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2317,11 +2317,7 @@ static bool isMathLibCallMemWriteOnly(const TargetLibraryInfo *TLI,
if (!Call)
return false;
- Function *F = Call->getCalledFunction();
- if (!F->hasFnAttribute(Attribute::AttrKind::Memory))
- return false;
-
- auto ME = F->getFnAttribute(Attribute::AttrKind::Memory).getMemoryEffects();
+ auto ME = Call->getMemoryEffects();
LibFunc Func;
TLI->getLibFunc(*Call, Func);
return ME.onlyWritesMemory() && ME.onlyAccessesArgPointees() &&
>From 2e3d0d6d643f4eab1882ea93f78bc9fe1c3c14af Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Tue, 6 Feb 2024 09:14:50 +0000
Subject: [PATCH 6/7] Rebased and updated test after PR #80296
---
.../aarch64-veclib-function-calls-linear-ptrs.c | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)
diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
index 98085a183f46c4..4a26d3ce9460d6 100644
--- a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
+++ b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
@@ -17,7 +17,7 @@ vectorize.
// CHECK-LABEL: define dso_local void @frexp_f64(
// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-// CHECK: [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5:[0-9]+]]
+// CHECK: [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2:[0-9]+]]
//
void frexp_f64(double *in, double *out1, int *out2, int N) {
for (int i = 0; i < N; ++i)
@@ -26,7 +26,7 @@ void frexp_f64(double *in, double *out1, int *out2, int N) {
// CHECK-LABEL: define dso_local void @frexp_f32(
// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK: [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5]]
+// CHECK: [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2]]
//
void frexp_f32(float *in, float *out1, int *out2, int N) {
for (int i = 0; i < N; ++i)
@@ -35,8 +35,7 @@ void frexp_f32(float *in, float *out1, int *out2, int N) {
// CHECK-LABEL: define dso_local void @modf_f64(
// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK: [[TMP11:%.*]] = tail call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP10:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-// CHECK: [[CALL:%.*]] = tail call double @modf(double noundef [[TMP14:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR6:[0-9]+]]
+// CHECK: [[CALL:%.*]] = tail call double @modf(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR3:[0-9]+]]
//
void modf_f64(double *in, double *out1, double *out2, int N) {
for (int i = 0; i < N; ++i)
@@ -45,8 +44,7 @@ void modf_f64(double *in, double *out1, double *out2, int N) {
// CHECK-LABEL: define dso_local void @modf_f32(
// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK: [[TMP11:%.*]] = tail call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP10:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-// CHECK: [[CALL:%.*]] = tail call float @modff(float noundef [[TMP14:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR7:[0-9]+]]
+// CHECK: [[CALL:%.*]] = tail call float @modff(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR4:[0-9]+]]
//
void modf_f32(float *in, float *out1, float *out2, int N) {
for (int i = 0; i < N; ++i)
>From 513ba7ad5ee6b61ddac14187f3376342e343c0d7 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Tue, 27 Feb 2024 15:59:14 +0000
Subject: [PATCH 7/7] Added LAA and LV tests
Removed C test.
Code rebased on top of patch that enables mappings for modf/modff
(among others).
---
...arch64-veclib-function-calls-linear-ptrs.c | 52 -------
llvm/lib/Analysis/LoopAccessAnalysis.cpp | 6 +-
.../LoopAccessAnalysis/attr-mem-write-only.ll | 117 ++++++++++++++++
.../veclib-function-calls-linear-ptrs.ll | 132 ++++++++++++++++++
4 files changed, 254 insertions(+), 53 deletions(-)
delete mode 100644 clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
create mode 100644 llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll
create mode 100644 llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll
diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
deleted file mode 100644
index 4a26d3ce9460d6..00000000000000
--- a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
+++ /dev/null
@@ -1,52 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --filter "call.*(frexp|modf)" --version 4
-// RUN: %clang --target=aarch64-linux-gnu -march=armv8-a+sve -O3 -isystem %S/../Headers/Inputs/include -mllvm -vector-library=ArmPL -mllvm -force-vector-interleave=1 -mllvm -prefer-predicate-over-epilogue=predicate-dont-vectorize -emit-llvm -S -o - %s | FileCheck %s
-
-// REQUIRES: aarch64-registered-target
-
-/*
-Testing vectorization of math functions that have the attribute write-only to
-memory set. Given they have vectorized counterparts, they should be able to
-vectorize.
-*/
-
-// The following define is required to access some math functions.
-#define _GNU_SOURCE
-#include <math.h>
-
-// frexp/frexpf have no TLI mappings yet.
-
-// CHECK-LABEL: define dso_local void @frexp_f64(
-// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-// CHECK: [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2:[0-9]+]]
-//
-void frexp_f64(double *in, double *out1, int *out2, int N) {
- for (int i = 0; i < N; ++i)
- *out1 = frexp(in[i], out2+i);
-}
-
-// CHECK-LABEL: define dso_local void @frexp_f32(
-// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK: [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2]]
-//
-void frexp_f32(float *in, float *out1, int *out2, int N) {
- for (int i = 0; i < N; ++i)
- *out1 = frexpf(in[i], out2+i);
-}
-
-// CHECK-LABEL: define dso_local void @modf_f64(
-// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK: [[CALL:%.*]] = tail call double @modf(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR3:[0-9]+]]
-//
-void modf_f64(double *in, double *out1, double *out2, int N) {
- for (int i = 0; i < N; ++i)
- out1[i] = modf(in[i], out2+i);
-}
-
-// CHECK-LABEL: define dso_local void @modf_f32(
-// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK: [[CALL:%.*]] = tail call float @modff(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR4:[0-9]+]]
-//
-void modf_f32(float *in, float *out1, float *out2, int N) {
- for (int i = 0; i < N; ++i)
- out1[i] = modff(in[i], out2+i);
-}
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 03e096298a7bee..f22827c27ae2ab 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2423,8 +2423,12 @@ void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
if (I.mayWriteToMemory()) {
// We can safety handle math functions that have vectorized
// counterparts and have the memory write-only attribute set.
- if (isMathLibCallMemWriteOnly(TLI, I))
+ if (isMathLibCallMemWriteOnly(TLI, I)) {
+ LLVM_DEBUG(dbgs()
+ << "LAA: allow math function with write-only attribute:"
+ << I << "\n");
continue;
+ }
auto *St = dyn_cast<StoreInst>(&I);
if (!St) {
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll b/llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll
new file mode 100644
index 00000000000000..aca4e4ad389216
--- /dev/null
+++ b/llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll
@@ -0,0 +1,117 @@
+; RUN: opt < %s -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -debug-only=loop-accesses -disable-output 2>&1 | FileCheck %s
+
+; REQUIRES: asserts
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; TODO: add mappings for frexp/frexpf
+
+define void @frexp_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+entry:
+ %cmp4 = icmp sgt i32 %N, 0
+ br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %wide.trip.count = zext nneg i32 %N to i64
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %indvars.iv
+ %0 = load double, ptr %arrayidx, align 8
+ %add.ptr = getelementptr inbounds i32, ptr %out2, i64 %indvars.iv
+ %call = tail call double @frexp(double noundef %0, ptr noundef %add.ptr)
+ store double %call, ptr %out1, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare double @frexp(double, ptr) #1
+
+define void @frexp_f32(ptr readonly %in, ptr %out1, ptr %out2, i32 %N) {
+entry:
+ %cmp4 = icmp sgt i32 %N, 0
+ br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %wide.trip.count = zext nneg i32 %N to i64
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %indvars.iv
+ %0 = load float, ptr %arrayidx, align 4
+ %add.ptr = getelementptr inbounds i32, ptr %out2, i64 %indvars.iv
+ %call = tail call float @frexpf(float noundef %0, ptr noundef %add.ptr)
+ store float %call, ptr %out1, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare float @frexpf(float , ptr) #1
+
+define void @modf_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK: LAA: allow math function with write-only attribute: %call = tail call double @modf
+entry:
+ %cmp7 = icmp sgt i32 %N, 0
+ br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %wide.trip.count = zext nneg i32 %N to i64
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %indvars.iv
+ %0 = load double, ptr %arrayidx, align 8
+ %add.ptr = getelementptr inbounds double, ptr %out2, i64 %indvars.iv
+ %call = tail call double @modf(double noundef %0, ptr noundef %add.ptr)
+ %arrayidx2 = getelementptr inbounds double, ptr %out1, i64 %indvars.iv
+ store double %call, ptr %arrayidx2, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare double @modf(double , ptr ) #1
+
+define void @modf_f32(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK: LAA: allow math function with write-only attribute: %call = tail call float @modff
+entry:
+ %cmp7 = icmp sgt i32 %N, 0
+ br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %wide.trip.count = zext nneg i32 %N to i64
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %indvars.iv
+ %0 = load float, ptr %arrayidx, align 4
+ %add.ptr = getelementptr inbounds float, ptr %out2, i64 %indvars.iv
+ %call = tail call float @modff(float noundef %0, ptr noundef %add.ptr)
+ %arrayidx2 = getelementptr inbounds float, ptr %out1, i64 %indvars.iv
+ store float %call, ptr %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare float @modff(float noundef, ptr nocapture noundef) #1
+
+attributes #1 = { memory(argmem: write) }
\ No newline at end of file
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll
new file mode 100644
index 00000000000000..0a502f52de9ccc
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll
@@ -0,0 +1,132 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "call.*(frexp|modf)" --version 4
+; RUN: opt < %s -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; TODO: add mappings for frexp/frexpf
+
+define void @frexp_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK-LABEL: define void @frexp_f64(
+; CHECK-SAME: ptr [[IN:%.*]], ptr [[OUT1:%.*]], ptr [[OUT2:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK: [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]])
+;
+entry:
+ %cmp4 = icmp sgt i32 %N, 0
+ br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %wide.trip.count = zext nneg i32 %N to i64
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %indvars.iv
+ %0 = load double, ptr %arrayidx, align 8
+ %add.ptr = getelementptr inbounds i32, ptr %out2, i64 %indvars.iv
+ %call = tail call double @frexp(double noundef %0, ptr noundef %add.ptr)
+ store double %call, ptr %out1, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare double @frexp(double, ptr) #1
+
+define void @frexp_f32(ptr readonly %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK-LABEL: define void @frexp_f32(
+; CHECK-SAME: ptr readonly [[IN:%.*]], ptr [[OUT1:%.*]], ptr [[OUT2:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK: [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]])
+;
+entry:
+ %cmp4 = icmp sgt i32 %N, 0
+ br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %wide.trip.count = zext nneg i32 %N to i64
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %indvars.iv
+ %0 = load float, ptr %arrayidx, align 4
+ %add.ptr = getelementptr inbounds i32, ptr %out2, i64 %indvars.iv
+ %call = tail call float @frexpf(float noundef %0, ptr noundef %add.ptr)
+ store float %call, ptr %out1, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare float @frexpf(float , ptr) #1
+
+define void @modf_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK-LABEL: define void @modf_f64(
+; CHECK-SAME: ptr [[IN:%.*]], ptr [[OUT1:%.*]], ptr [[OUT2:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK: [[TMP27:%.*]] = call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP26:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; CHECK: [[CALL:%.*]] = tail call double @modf(double noundef [[TMP32:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5:[0-9]+]]
+;
+entry:
+ %cmp7 = icmp sgt i32 %N, 0
+ br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %wide.trip.count = zext nneg i32 %N to i64
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %in, i64 %indvars.iv
+ %0 = load double, ptr %arrayidx, align 8
+ %add.ptr = getelementptr inbounds double, ptr %out2, i64 %indvars.iv
+ %call = tail call double @modf(double noundef %0, ptr noundef %add.ptr)
+ %arrayidx2 = getelementptr inbounds double, ptr %out1, i64 %indvars.iv
+ store double %call, ptr %arrayidx2, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare double @modf(double , ptr ) #1
+
+define void @modf_f32(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK-LABEL: define void @modf_f32(
+; CHECK-SAME: ptr [[IN:%.*]], ptr [[OUT1:%.*]], ptr [[OUT2:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK: [[TMP27:%.*]] = call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP26:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; CHECK: [[CALL:%.*]] = tail call float @modff(float noundef [[TMP32:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR6:[0-9]+]]
+;
+entry:
+ %cmp7 = icmp sgt i32 %N, 0
+ br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %wide.trip.count = zext nneg i32 %N to i64
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %in, i64 %indvars.iv
+ %0 = load float, ptr %arrayidx, align 4
+ %add.ptr = getelementptr inbounds float, ptr %out2, i64 %indvars.iv
+ %call = tail call float @modff(float noundef %0, ptr noundef %add.ptr)
+ %arrayidx2 = getelementptr inbounds float, ptr %out1, i64 %indvars.iv
+ store float %call, ptr %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare float @modff(float noundef, ptr nocapture noundef) #1
+
+attributes #1 = { memory(argmem: write) }
More information about the llvm-commits
mailing list