[clang] [llvm] [LV][LAA] Vectorize math lib calls with mem write-only attribute (PR #78432)

Paschalis Mpeis via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 30 08:21:34 PDT 2024


https://github.com/paschalis-mpeis updated https://github.com/llvm/llvm-project/pull/78432

>From d3ca209a7690d9ecbe188d8a2145177f43ed1951 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Tue, 16 Jan 2024 10:53:09 +0000
Subject: [PATCH 1/7] LAA cannot vectorize lib calls like modf/modff

Functions like modf/modff are math lib calls that set memory write-only
attribute. Given that a target has vectorized mappings, LAA should allow
vectorization.
---
 ...arch64-veclib-function-calls-linear-ptrs.c | 57 +++++++++++++++++++
 1 file changed, 57 insertions(+)
 create mode 100644 clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c

diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
new file mode 100644
index 00000000000000..a449fac147058a
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
@@ -0,0 +1,57 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --filter "call.*(frexp|modf)" --version 4
+// RUN: %clang --target=aarch64-linux-gnu -march=armv8-a+sve -O3 -mllvm -vector-library=ArmPL -mllvm -force-vector-interleave=1 -mllvm -prefer-predicate-over-epilogue=predicate-dont-vectorize -emit-llvm -S -o - %s | FileCheck %s
+
+// REQUIRES: aarch64-registered-target
+
+/*
+Testing vectorization of math functions that have the attribute write-only to
+memory set. Given they have vectorized counterparts, they should be able to
+vectorize.
+*/
+
+// The following define is required to access some math functions.
+#define _GNU_SOURCE
+#include <math.h>
+
+// frexp/frexpf have no TLI mappings yet.
+
+// CHECK-LABEL: define dso_local void @frexp_f64(
+// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// CHECK:    [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2:[0-9]+]]
+//
+void frexp_f64(double *in, double *out1, int *out2, int N) {
+  for (int i = 0; i < N; ++i)
+    *out1 = frexp(in[i], out2+i);
+}
+
+// CHECK-LABEL: define dso_local void @frexp_f32(
+// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK:    [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2]]
+//
+void frexp_f32(float *in, float *out1, int *out2, int N) {
+  for (int i = 0; i < N; ++i)
+    *out1 = frexpf(in[i], out2+i);
+}
+
+
+// TODO: LAA must allow vectorization.
+
+// CHECK-LABEL: define dso_local void @modf_f64(
+// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK:    [[CALL:%.*]] = tail call double @modf(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR3:[0-9]+]]
+//
+void modf_f64(double *in, double *out1, double *out2, int N) {
+  for (int i = 0; i < N; ++i)
+      out1[i] = modf(in[i], out2+i);
+}
+
+// TODO: LAA must allow vectorization.
+
+// CHECK-LABEL: define dso_local void @modf_f32(
+// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK:    [[CALL:%.*]] = tail call float @modff(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR4:[0-9]+]]
+//
+void modf_f32(float *in, float *out1, float *out2, int N) {
+  for (int i = 0; i < N; ++i)
+      out1[i] = modff(in[i], out2+i);
+}

>From 1dc1b5cb8e52761027d85edcd057ef57710d2699 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Wed, 17 Jan 2024 09:44:45 +0000
Subject: [PATCH 2/7] [LV][LAA] Vectorize math lib calls with mem write-only
 attribute

Teach LAA to consider safe specific math lib calls which are known to
have set the memory write-only attribute. Those attributes are set to
calls by inferNonMandatoryLibFuncAttrs, in BuildLibCalls.cpp, and the
current ones are modf/modff and frexp/frexpf.

This happens only when the calls are found through TLI to have
vectorized counterparts.
---
 ...arch64-veclib-function-calls-linear-ptrs.c | 15 ++++++---------
 llvm/lib/Analysis/LoopAccessAnalysis.cpp      | 19 +++++++++++++++++++
 2 files changed, 25 insertions(+), 9 deletions(-)

diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
index a449fac147058a..957b3f5cb235d3 100644
--- a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
+++ b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
@@ -17,7 +17,7 @@ vectorize.
 
 // CHECK-LABEL: define dso_local void @frexp_f64(
 // CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-// CHECK:    [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2:[0-9]+]]
+// CHECK:    [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5:[0-9]+]]
 //
 void frexp_f64(double *in, double *out1, int *out2, int N) {
   for (int i = 0; i < N; ++i)
@@ -26,30 +26,27 @@ void frexp_f64(double *in, double *out1, int *out2, int N) {
 
 // CHECK-LABEL: define dso_local void @frexp_f32(
 // CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK:    [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2]]
+// CHECK:    [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5]]
 //
 void frexp_f32(float *in, float *out1, int *out2, int N) {
   for (int i = 0; i < N; ++i)
     *out1 = frexpf(in[i], out2+i);
 }
 
-
-// TODO: LAA must allow vectorization.
-
 // CHECK-LABEL: define dso_local void @modf_f64(
 // CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK:    [[CALL:%.*]] = tail call double @modf(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR3:[0-9]+]]
+// CHECK:    [[TMP11:%.*]] = tail call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP10:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+// CHECK:    [[CALL:%.*]] = tail call double @modf(double noundef [[TMP14:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR6:[0-9]+]]
 //
 void modf_f64(double *in, double *out1, double *out2, int N) {
   for (int i = 0; i < N; ++i)
       out1[i] = modf(in[i], out2+i);
 }
 
-// TODO: LAA must allow vectorization.
-
 // CHECK-LABEL: define dso_local void @modf_f32(
 // CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK:    [[CALL:%.*]] = tail call float @modff(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR4:[0-9]+]]
+// CHECK:    [[TMP11:%.*]] = tail call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP10:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+// CHECK:    [[CALL:%.*]] = tail call float @modff(float noundef [[TMP14:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR7:[0-9]+]]
 //
 void modf_f32(float *in, float *out1, float *out2, int N) {
   for (int i = 0; i < N; ++i)
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index f65515ca387229..e13f617cce8821 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2380,6 +2380,20 @@ bool LoopAccessInfo::canAnalyzeLoop() {
   return true;
 }
 
+/// Returns whether \p I is a known math library call that has memory write-only
+/// attribute set.
+static bool isMathLibCallMemWriteOnly(const TargetLibraryInfo *TLI,
+                                      const Instruction &I) {
+  auto *Call = dyn_cast<CallInst>(&I);
+  if (!Call)
+    return false;
+
+  LibFunc Func;
+  TLI->getLibFunc(*Call, Func);
+  return Func == LibFunc::LibFunc_modf || Func == LibFunc::LibFunc_modff ||
+         Func == LibFunc::LibFunc_frexp || Func == LibFunc::LibFunc_frexpf;
+}
+
 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
                                  const TargetLibraryInfo *TLI,
                                  DominatorTree *DT) {
@@ -2476,6 +2490,11 @@ void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
 
       // Save 'store' instructions. Abort if other instructions write to memory.
       if (I.mayWriteToMemory()) {
+        // We can safety handle math functions that have vectorized
+        // counterparts and have the memory write-only attribute set.
+        if (isMathLibCallMemWriteOnly(TLI, I))
+          continue;
+
         auto *St = dyn_cast<StoreInst>(&I);
         if (!St) {
           recordAnalysis("CantVectorizeInstruction", St)

>From d7245a22ca9afe4c1e7ff52f6dfe8041689e014e Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Thu, 18 Jan 2024 14:14:00 +0000
Subject: [PATCH 3/7] Add check for the 'memory(argmem: write)' attribute.

---
 llvm/lib/Analysis/LoopAccessAnalysis.cpp | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index e13f617cce8821..ac2f648bd1838f 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2380,18 +2380,24 @@ bool LoopAccessInfo::canAnalyzeLoop() {
   return true;
 }
 
-/// Returns whether \p I is a known math library call that has memory write-only
-/// attribute set.
+/// Returns whether \p I is a known math library call that has attribute
+/// 'memory(argmem: write)' set.
 static bool isMathLibCallMemWriteOnly(const TargetLibraryInfo *TLI,
                                       const Instruction &I) {
   auto *Call = dyn_cast<CallInst>(&I);
   if (!Call)
     return false;
 
+  Function *F = Call->getCalledFunction();
+  if (!F->hasFnAttribute(Attribute::AttrKind::Memory))
+    return false;
+
+  auto ME = F->getFnAttribute(Attribute::AttrKind::Memory).getMemoryEffects();
   LibFunc Func;
   TLI->getLibFunc(*Call, Func);
-  return Func == LibFunc::LibFunc_modf || Func == LibFunc::LibFunc_modff ||
-         Func == LibFunc::LibFunc_frexp || Func == LibFunc::LibFunc_frexpf;
+  return ME.onlyWritesMemory() && ME.onlyAccessesArgPointees() &&
+         (Func == LibFunc::LibFunc_modf || Func == LibFunc::LibFunc_modff ||
+          Func == LibFunc::LibFunc_frexp || Func == LibFunc::LibFunc_frexpf);
 }
 
 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,

>From 13a30ff4e84464cc215bede791ab968c41ef1b9e Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Fri, 19 Jan 2024 10:20:50 +0000
Subject: [PATCH 4/7] Addressing reviewers

---
 .../CodeGen/aarch64-veclib-function-calls-linear-ptrs.c     | 2 +-
 llvm/lib/Analysis/LoopAccessAnalysis.cpp                    | 6 +-----
 2 files changed, 2 insertions(+), 6 deletions(-)

diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
index 957b3f5cb235d3..98085a183f46c4 100644
--- a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
+++ b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
@@ -1,5 +1,5 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --filter "call.*(frexp|modf)" --version 4
-// RUN: %clang --target=aarch64-linux-gnu -march=armv8-a+sve -O3 -mllvm -vector-library=ArmPL -mllvm -force-vector-interleave=1 -mllvm -prefer-predicate-over-epilogue=predicate-dont-vectorize -emit-llvm -S -o - %s | FileCheck %s
+// RUN: %clang --target=aarch64-linux-gnu -march=armv8-a+sve -O3 -isystem %S/../Headers/Inputs/include -mllvm -vector-library=ArmPL -mllvm -force-vector-interleave=1 -mllvm -prefer-predicate-over-epilogue=predicate-dont-vectorize -emit-llvm -S -o - %s | FileCheck %s
 
 // REQUIRES: aarch64-registered-target
 
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index ac2f648bd1838f..4d86162aad4f9f 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2388,11 +2388,7 @@ static bool isMathLibCallMemWriteOnly(const TargetLibraryInfo *TLI,
   if (!Call)
     return false;
 
-  Function *F = Call->getCalledFunction();
-  if (!F->hasFnAttribute(Attribute::AttrKind::Memory))
-    return false;
-
-  auto ME = F->getFnAttribute(Attribute::AttrKind::Memory).getMemoryEffects();
+  auto ME = Call->getMemoryEffects();
   LibFunc Func;
   TLI->getLibFunc(*Call, Func);
   return ME.onlyWritesMemory() && ME.onlyAccessesArgPointees() &&

>From f1a2f835524d474729dfc86e502e52d3a411b7d0 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Tue, 6 Feb 2024 09:14:50 +0000
Subject: [PATCH 5/7] Rebased and updated test after PR #80296

---
 .../aarch64-veclib-function-calls-linear-ptrs.c        | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
index 98085a183f46c4..4a26d3ce9460d6 100644
--- a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
+++ b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
@@ -17,7 +17,7 @@ vectorize.
 
 // CHECK-LABEL: define dso_local void @frexp_f64(
 // CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-// CHECK:    [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5:[0-9]+]]
+// CHECK:    [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2:[0-9]+]]
 //
 void frexp_f64(double *in, double *out1, int *out2, int N) {
   for (int i = 0; i < N; ++i)
@@ -26,7 +26,7 @@ void frexp_f64(double *in, double *out1, int *out2, int N) {
 
 // CHECK-LABEL: define dso_local void @frexp_f32(
 // CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK:    [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5]]
+// CHECK:    [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2]]
 //
 void frexp_f32(float *in, float *out1, int *out2, int N) {
   for (int i = 0; i < N; ++i)
@@ -35,8 +35,7 @@ void frexp_f32(float *in, float *out1, int *out2, int N) {
 
 // CHECK-LABEL: define dso_local void @modf_f64(
 // CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK:    [[TMP11:%.*]] = tail call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP10:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-// CHECK:    [[CALL:%.*]] = tail call double @modf(double noundef [[TMP14:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR6:[0-9]+]]
+// CHECK:    [[CALL:%.*]] = tail call double @modf(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR3:[0-9]+]]
 //
 void modf_f64(double *in, double *out1, double *out2, int N) {
   for (int i = 0; i < N; ++i)
@@ -45,8 +44,7 @@ void modf_f64(double *in, double *out1, double *out2, int N) {
 
 // CHECK-LABEL: define dso_local void @modf_f32(
 // CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK:    [[TMP11:%.*]] = tail call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP10:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-// CHECK:    [[CALL:%.*]] = tail call float @modff(float noundef [[TMP14:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR7:[0-9]+]]
+// CHECK:    [[CALL:%.*]] = tail call float @modff(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR4:[0-9]+]]
 //
 void modf_f32(float *in, float *out1, float *out2, int N) {
   for (int i = 0; i < N; ++i)

>From 7e20927dfde045d777a60f468536761e99970ae0 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Tue, 27 Feb 2024 15:59:14 +0000
Subject: [PATCH 6/7] Added LAA and LV tests

Removed C test.
Code rebased on top of patch that enables mappings for modf/modff
(among others).
---
 ...arch64-veclib-function-calls-linear-ptrs.c |  52 -------
 llvm/lib/Analysis/LoopAccessAnalysis.cpp      |   6 +-
 .../LoopAccessAnalysis/attr-mem-write-only.ll | 117 ++++++++++++++++
 .../veclib-function-calls-linear-ptrs.ll      | 132 ++++++++++++++++++
 4 files changed, 254 insertions(+), 53 deletions(-)
 delete mode 100644 clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
 create mode 100644 llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll
 create mode 100644 llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll

diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
deleted file mode 100644
index 4a26d3ce9460d6..00000000000000
--- a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c
+++ /dev/null
@@ -1,52 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --filter "call.*(frexp|modf)" --version 4
-// RUN: %clang --target=aarch64-linux-gnu -march=armv8-a+sve -O3 -isystem %S/../Headers/Inputs/include -mllvm -vector-library=ArmPL -mllvm -force-vector-interleave=1 -mllvm -prefer-predicate-over-epilogue=predicate-dont-vectorize -emit-llvm -S -o - %s | FileCheck %s
-
-// REQUIRES: aarch64-registered-target
-
-/*
-Testing vectorization of math functions that have the attribute write-only to
-memory set. Given they have vectorized counterparts, they should be able to
-vectorize.
-*/
-
-// The following define is required to access some math functions.
-#define _GNU_SOURCE
-#include <math.h>
-
-// frexp/frexpf have no TLI mappings yet.
-
-// CHECK-LABEL: define dso_local void @frexp_f64(
-// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-// CHECK:    [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2:[0-9]+]]
-//
-void frexp_f64(double *in, double *out1, int *out2, int N) {
-  for (int i = 0; i < N; ++i)
-    *out1 = frexp(in[i], out2+i);
-}
-
-// CHECK-LABEL: define dso_local void @frexp_f32(
-// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK:    [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR2]]
-//
-void frexp_f32(float *in, float *out1, int *out2, int N) {
-  for (int i = 0; i < N; ++i)
-    *out1 = frexpf(in[i], out2+i);
-}
-
-// CHECK-LABEL: define dso_local void @modf_f64(
-// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK:    [[CALL:%.*]] = tail call double @modf(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR3:[0-9]+]]
-//
-void modf_f64(double *in, double *out1, double *out2, int N) {
-  for (int i = 0; i < N; ++i)
-      out1[i] = modf(in[i], out2+i);
-}
-
-// CHECK-LABEL: define dso_local void @modf_f32(
-// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] {
-// CHECK:    [[CALL:%.*]] = tail call float @modff(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR4:[0-9]+]]
-//
-void modf_f32(float *in, float *out1, float *out2, int N) {
-  for (int i = 0; i < N; ++i)
-      out1[i] = modff(in[i], out2+i);
-}
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 4d86162aad4f9f..a945da13cb0dbb 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2494,8 +2494,12 @@ void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
       if (I.mayWriteToMemory()) {
         // We can safety handle math functions that have vectorized
         // counterparts and have the memory write-only attribute set.
-        if (isMathLibCallMemWriteOnly(TLI, I))
+        if (isMathLibCallMemWriteOnly(TLI, I)) {
+          LLVM_DEBUG(dbgs()
+                     << "LAA: allow math function with write-only attribute:"
+                     << I << "\n");
           continue;
+        }
 
         auto *St = dyn_cast<StoreInst>(&I);
         if (!St) {
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll b/llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll
new file mode 100644
index 00000000000000..aca4e4ad389216
--- /dev/null
+++ b/llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll
@@ -0,0 +1,117 @@
+; RUN: opt < %s -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -debug-only=loop-accesses -disable-output 2>&1 | FileCheck %s
+
+; REQUIRES: asserts
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; TODO: add mappings for frexp/frexpf
+
+define void @frexp_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+entry:
+  %cmp4 = icmp sgt i32 %N, 0
+  br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %wide.trip.count = zext nneg i32 %N to i64
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds double, ptr %in, i64 %indvars.iv
+  %0 = load double, ptr %arrayidx, align 8
+  %add.ptr = getelementptr inbounds i32, ptr %out2, i64 %indvars.iv
+  %call = tail call double @frexp(double noundef %0, ptr noundef %add.ptr)
+  store double %call, ptr %out1, align 8
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare double @frexp(double, ptr) #1
+
+define void @frexp_f32(ptr readonly %in, ptr %out1, ptr %out2, i32 %N) {
+entry:
+  %cmp4 = icmp sgt i32 %N, 0
+  br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %wide.trip.count = zext nneg i32 %N to i64
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds float, ptr %in, i64 %indvars.iv
+  %0 = load float, ptr %arrayidx, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %out2, i64 %indvars.iv
+  %call = tail call float @frexpf(float noundef %0, ptr noundef %add.ptr)
+  store float %call, ptr %out1, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare float @frexpf(float , ptr) #1
+
+define void @modf_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK: LAA: allow math function with write-only attribute:  %call = tail call double @modf
+entry:
+  %cmp7 = icmp sgt i32 %N, 0
+  br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %wide.trip.count = zext nneg i32 %N to i64
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds double, ptr %in, i64 %indvars.iv
+  %0 = load double, ptr %arrayidx, align 8
+  %add.ptr = getelementptr inbounds double, ptr %out2, i64 %indvars.iv
+  %call = tail call double @modf(double noundef %0, ptr noundef %add.ptr)
+  %arrayidx2 = getelementptr inbounds double, ptr %out1, i64 %indvars.iv
+  store double %call, ptr %arrayidx2, align 8
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare double @modf(double , ptr ) #1
+
+define void @modf_f32(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK: LAA: allow math function with write-only attribute:  %call = tail call float @modff
+entry:
+  %cmp7 = icmp sgt i32 %N, 0
+  br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %wide.trip.count = zext nneg i32 %N to i64
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds float, ptr %in, i64 %indvars.iv
+  %0 = load float, ptr %arrayidx, align 4
+  %add.ptr = getelementptr inbounds float, ptr %out2, i64 %indvars.iv
+  %call = tail call float @modff(float noundef %0, ptr noundef %add.ptr)
+  %arrayidx2 = getelementptr inbounds float, ptr %out1, i64 %indvars.iv
+  store float %call, ptr %arrayidx2, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare float @modff(float noundef, ptr nocapture noundef) #1
+
+attributes #1 = { memory(argmem: write) }
\ No newline at end of file
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll
new file mode 100644
index 00000000000000..0a502f52de9ccc
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll
@@ -0,0 +1,132 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "call.*(frexp|modf)" --version 4
+; RUN: opt < %s -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; TODO: add mappings for frexp/frexpf
+
+define void @frexp_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK-LABEL: define void @frexp_f64(
+; CHECK-SAME: ptr [[IN:%.*]], ptr [[OUT1:%.*]], ptr [[OUT2:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK:    [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]])
+;
+entry:
+  %cmp4 = icmp sgt i32 %N, 0
+  br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %wide.trip.count = zext nneg i32 %N to i64
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds double, ptr %in, i64 %indvars.iv
+  %0 = load double, ptr %arrayidx, align 8
+  %add.ptr = getelementptr inbounds i32, ptr %out2, i64 %indvars.iv
+  %call = tail call double @frexp(double noundef %0, ptr noundef %add.ptr)
+  store double %call, ptr %out1, align 8
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare double @frexp(double, ptr) #1
+
+define void @frexp_f32(ptr readonly %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK-LABEL: define void @frexp_f32(
+; CHECK-SAME: ptr readonly [[IN:%.*]], ptr [[OUT1:%.*]], ptr [[OUT2:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK:    [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]])
+;
+entry:
+  %cmp4 = icmp sgt i32 %N, 0
+  br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %wide.trip.count = zext nneg i32 %N to i64
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds float, ptr %in, i64 %indvars.iv
+  %0 = load float, ptr %arrayidx, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %out2, i64 %indvars.iv
+  %call = tail call float @frexpf(float noundef %0, ptr noundef %add.ptr)
+  store float %call, ptr %out1, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare float @frexpf(float , ptr) #1
+
+define void @modf_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK-LABEL: define void @modf_f64(
+; CHECK-SAME: ptr [[IN:%.*]], ptr [[OUT1:%.*]], ptr [[OUT2:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK:    [[TMP27:%.*]] = call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP26:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; CHECK:    [[CALL:%.*]] = tail call double @modf(double noundef [[TMP32:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5:[0-9]+]]
+;
+entry:
+  %cmp7 = icmp sgt i32 %N, 0
+  br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %wide.trip.count = zext nneg i32 %N to i64
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds double, ptr %in, i64 %indvars.iv
+  %0 = load double, ptr %arrayidx, align 8
+  %add.ptr = getelementptr inbounds double, ptr %out2, i64 %indvars.iv
+  %call = tail call double @modf(double noundef %0, ptr noundef %add.ptr)
+  %arrayidx2 = getelementptr inbounds double, ptr %out1, i64 %indvars.iv
+  store double %call, ptr %arrayidx2, align 8
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare double @modf(double , ptr ) #1
+
+define void @modf_f32(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK-LABEL: define void @modf_f32(
+; CHECK-SAME: ptr [[IN:%.*]], ptr [[OUT1:%.*]], ptr [[OUT2:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK:    [[TMP27:%.*]] = call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP26:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; CHECK:    [[CALL:%.*]] = tail call float @modff(float noundef [[TMP32:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR6:[0-9]+]]
+;
+entry:
+  %cmp7 = icmp sgt i32 %N, 0
+  br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %wide.trip.count = zext nneg i32 %N to i64
+  br label %for.body
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds float, ptr %in, i64 %indvars.iv
+  %0 = load float, ptr %arrayidx, align 4
+  %add.ptr = getelementptr inbounds float, ptr %out2, i64 %indvars.iv
+  %call = tail call float @modff(float noundef %0, ptr noundef %add.ptr)
+  %arrayidx2 = getelementptr inbounds float, ptr %out1, i64 %indvars.iv
+  store float %call, ptr %arrayidx2, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare float @modff(float noundef, ptr nocapture noundef) #1
+
+attributes #1 = { memory(argmem: write) }

>From c3aa63c98627c2edf16420c5a4aa53ba233d544c Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Tue, 30 Apr 2024 15:31:18 +0100
Subject: [PATCH 7/7] Addressing reviewers (2)

---
 llvm/lib/Analysis/LoopAccessAnalysis.cpp              |  6 +++---
 .../LoopAccessAnalysis/attr-mem-write-only.ll         | 11 +++++------
 .../AArch64/veclib-function-calls-linear-ptrs.ll      |  4 +++-
 3 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index a945da13cb0dbb..a15309873cad90 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2495,9 +2495,9 @@ void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
         // We can safety handle math functions that have vectorized
         // counterparts and have the memory write-only attribute set.
         if (isMathLibCallMemWriteOnly(TLI, I)) {
-          LLVM_DEBUG(dbgs()
-                     << "LAA: allow math function with write-only attribute:"
-                     << I << "\n");
+          LLVM_DEBUG(dbgs() << "LAA: Allow to vectorize math function with "
+                               "write-only attribute:"
+                            << I << "\n");
           continue;
         }
 
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll b/llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll
index aca4e4ad389216..8a2e95239b9533 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/attr-mem-write-only.ll
@@ -1,12 +1,10 @@
-; RUN: opt < %s -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -debug-only=loop-accesses -disable-output 2>&1 | FileCheck %s
+; RUN: opt < %s -passes='print<access-info>' -debug-only=loop-accesses -disable-output 2>&1 | FileCheck %s
 
 ; REQUIRES: asserts
 
-target triple = "aarch64-unknown-linux-gnu"
-
-; TODO: add mappings for frexp/frexpf
 
 define void @frexp_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK: LAA: Allow to vectorize math function with write-only attribute: %call = tail call double @frexp
 entry:
   %cmp4 = icmp sgt i32 %N, 0
   br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
@@ -33,6 +31,7 @@ for.body:
 declare double @frexp(double, ptr) #1
 
 define void @frexp_f32(ptr readonly %in, ptr %out1, ptr %out2, i32 %N) {
+; CHECK: LAA: Allow to vectorize math function with write-only attribute: %call = tail call float @frexpf
 entry:
   %cmp4 = icmp sgt i32 %N, 0
   br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
@@ -59,7 +58,7 @@ for.body:
 declare float @frexpf(float , ptr) #1
 
 define void @modf_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
-; CHECK: LAA: allow math function with write-only attribute:  %call = tail call double @modf
+; CHECK: LAA: Allow to vectorize math function with write-only attribute: %call = tail call double @modf
 entry:
   %cmp7 = icmp sgt i32 %N, 0
   br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
@@ -87,7 +86,7 @@ for.body:
 declare double @modf(double , ptr ) #1
 
 define void @modf_f32(ptr %in, ptr %out1, ptr %out2, i32 %N) {
-; CHECK: LAA: allow math function with write-only attribute:  %call = tail call float @modff
+; CHECK: LAA: Allow to vectorize math function with write-only attribute: %call = tail call float @modff
 entry:
   %cmp7 = icmp sgt i32 %N, 0
   br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll
index 0a502f52de9ccc..f513360cf670aa 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls-linear-ptrs.ll
@@ -3,7 +3,9 @@
 
 target triple = "aarch64-unknown-linux-gnu"
 
-; TODO: add mappings for frexp/frexpf
+; Vectorization can not happen because there is no scalar to vector mapping in
+; TLI for frexp/frexpf. Tests will need to be changed when such mappings are
+; added.
 
 define void @frexp_f64(ptr %in, ptr %out1, ptr %out2, i32 %N) {
 ; CHECK-LABEL: define void @frexp_f64(



More information about the llvm-commits mailing list