[llvm] [NFC][TLI] Improve tests for ArmPL and SLEEF Intrinsics. (PR #73352)

Paschalis Mpeis via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 27 09:49:03 PST 2023


https://github.com/paschalis-mpeis updated https://github.com/llvm/llvm-project/pull/73352

>From 21b30e18814016dc61b1a1ed87609e53454e3553 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Fri, 24 Nov 2023 13:44:45 +0000
Subject: [PATCH 1/2] [NFC][TLI] Improve tests for ArmPL and SLEEF Intrinsics.

Auto-generate test `armpl-intrinsics.ll`, and use active lane mask to
have shorter `shufflevector` check lines.

Update scripts now add `@llvm.compiler.used` instead of using the regex:
`@[[LLVM_COMPILER_USED:[a-zA-Z0-9_$"\\.-]+]]`
---
 .../replace-intrinsics-with-veclib-armpl.ll   |   2 +-
 .../replace-intrinsics-with-veclib-sleef.ll   |   2 +-
 .../LoopVectorize/AArch64/armpl-intrinsics.ll | 275 +++++++++++++-----
 .../AArch64/sleef-intrinsic-calls-aarch64.ll  | 128 ++++----
 4 files changed, 263 insertions(+), 144 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll b/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll
index a38d4a53407c5d2..18431ae021f9766 100644
--- a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll
+++ b/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll
@@ -15,7 +15,7 @@ declare <vscale x 2 x double> @llvm.cos.nxv2f64(<vscale x 2 x double>)
 declare <vscale x 4 x float> @llvm.cos.nxv4f32(<vscale x 4 x float>)
 
 ;.
-; CHECK: @[[LLVM_COMPILER_USED:[a-zA-Z0-9_$"\\.-]+]] = appending global [16 x ptr] [ptr @armpl_vcosq_f64, ptr @armpl_vcosq_f32, ptr @armpl_vsinq_f64, ptr @armpl_vsinq_f32, ptr @armpl_vexpq_f64, ptr @armpl_vexpq_f32, ptr @armpl_vexp2q_f64, ptr @armpl_vexp2q_f32, ptr @armpl_vexp10q_f64, ptr @armpl_vexp10q_f32, ptr @armpl_vlogq_f64, ptr @armpl_vlogq_f32, ptr @armpl_vlog2q_f64, ptr @armpl_vlog2q_f32, ptr @armpl_vlog10q_f64, ptr @armpl_vlog10q_f32], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [16 x ptr] [ptr @armpl_vcosq_f64, ptr @armpl_vcosq_f32, ptr @armpl_vsinq_f64, ptr @armpl_vsinq_f32, ptr @armpl_vexpq_f64, ptr @armpl_vexpq_f32, ptr @armpl_vexp2q_f64, ptr @armpl_vexp2q_f32, ptr @armpl_vexp10q_f64, ptr @armpl_vexp10q_f32, ptr @armpl_vlogq_f64, ptr @armpl_vlogq_f32, ptr @armpl_vlog2q_f64, ptr @armpl_vlog2q_f32, ptr @armpl_vlog10q_f64, ptr @armpl_vlog10q_f32], section "llvm.metadata"
 ;.
 define <2 x double> @llvm_cos_f64(<2 x double> %in) {
 ; CHECK-LABEL: define <2 x double> @llvm_cos_f64
diff --git a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll b/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll
index cedb7dd85149d00..be247de368056e7 100644
--- a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll
+++ b/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll
@@ -4,7 +4,7 @@
 target triple = "aarch64-unknown-linux-gnu"
 
 ;.
-; CHECK: @[[LLVM_COMPILER_USED:[a-zA-Z0-9_$"\\.-]+]] = appending global [16 x ptr] [ptr @_ZGVnN2v_cos, ptr @_ZGVnN4v_cosf, ptr @_ZGVnN2v_exp, ptr @_ZGVnN4v_expf, ptr @_ZGVnN2v_exp2, ptr @_ZGVnN4v_exp2f, ptr @_ZGVnN2v_exp10, ptr @_ZGVnN4v_exp10f, ptr @_ZGVnN2v_log, ptr @_ZGVnN4v_logf, ptr @_ZGVnN2v_log10, ptr @_ZGVnN4v_log10f, ptr @_ZGVnN2v_log2, ptr @_ZGVnN4v_log2f, ptr @_ZGVnN2v_sin, ptr @_ZGVnN4v_sinf], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [16 x ptr] [ptr @_ZGVnN2v_cos, ptr @_ZGVnN4v_cosf, ptr @_ZGVnN2v_exp, ptr @_ZGVnN4v_expf, ptr @_ZGVnN2v_exp2, ptr @_ZGVnN4v_exp2f, ptr @_ZGVnN2v_exp10, ptr @_ZGVnN4v_exp10f, ptr @_ZGVnN2v_log, ptr @_ZGVnN4v_logf, ptr @_ZGVnN2v_log10, ptr @_ZGVnN4v_log10f, ptr @_ZGVnN2v_log2, ptr @_ZGVnN4v_log2f, ptr @_ZGVnN2v_sin, ptr @_ZGVnN4v_sinf], section "llvm.metadata"
 ;.
 define <2 x double> @llvm_ceil_f64(<2 x double> %in) {
 ; CHECK-LABEL: @llvm_ceil_f64(
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll
index 03d959c928577d5..07b1402b4697fa2 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll
@@ -1,10 +1,9 @@
-; RUN: opt -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -S < %s | FileCheck %s --check-prefixes=CHECK,NEON
-; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -S < %s | FileCheck %s --check-prefixes=CHECK,SVE
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(\.|_v|_sv)(ceil|copysign|cos|exp\.|expf?\(|exp2|exp10|fabs|floor|fma|log|m..num|pow|nearbyint|rint|round|sin|sqrt|trunc)|(ret)" --version 2
+; RUN: opt -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -prefer-predicate-over-epilogue=predicate-dont-vectorize  -S < %s | FileCheck %s --check-prefixes=NEON
+; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefixes=SVE
 
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-linux-gnu"
 
-
 ; Tests are checking if LV can vectorize loops with llvm math intrinsics
 ; using mappings from TLI for scalable and fixed width vectorization.
 
@@ -12,10 +11,18 @@ declare double @llvm.cos.f64(double)
 declare float @llvm.cos.f32(float)
 
 define void @cos_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @cos_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vcosq_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svcos_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+;
+; NEON-LABEL: define void @cos_f64
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vcosq_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call double @llvm.cos.f64(double [[IN:%.*]]) #[[ATTR1:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @cos_f64
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1:[0-9]+]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svcos_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.cos.f64(double [[IN:%.*]]) #[[ATTR5:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -36,10 +43,17 @@ define void @cos_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
 }
 
 define void @cos_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @cos_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vcosq_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svcos_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+; NEON-LABEL: define void @cos_f32
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vcosq_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call float @llvm.cos.f32(float [[IN:%.*]]) #[[ATTR2:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @cos_f32
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svcos_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.cos.f32(float [[IN:%.*]]) #[[ATTR6:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -63,10 +77,15 @@ declare double @llvm.exp.f64(double)
 declare float @llvm.exp.f32(float)
 
 define void @exp_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vexpq_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svexp_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+; NEON-LABEL: define void @exp_f64
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[CALL:%.*]] = tail call double @llvm.exp.f64(double [[IN:%.*]]) #[[ATTR3:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp_f64
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[CALL:%.*]] = tail call double @llvm.exp.f64(double [[IN:%.*]]) #[[ATTR7:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -87,10 +106,15 @@ define void @exp_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
 }
 
 define void @exp_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vexpq_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svexp_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+; NEON-LABEL: define void @exp_f32
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[CALL:%.*]] = tail call float @llvm.exp.f32(float [[IN:%.*]]) #[[ATTR4:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp_f32
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[CALL:%.*]] = tail call float @llvm.exp.f32(float [[IN:%.*]]) #[[ATTR8:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -114,10 +138,17 @@ declare double @llvm.exp2.f64(double)
 declare float @llvm.exp2.f32(float)
 
 define void @exp2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp2_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vexp2q_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svexp2_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+; NEON-LABEL: define void @exp2_f64
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vexp2q_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call double @llvm.exp2.f64(double [[IN:%.*]]) #[[ATTR5:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp2_f64
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svexp2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.exp2.f64(double [[IN:%.*]]) #[[ATTR9:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -138,10 +169,17 @@ define void @exp2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
 }
 
 define void @exp2_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp2_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vexp2q_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svexp2_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+; NEON-LABEL: define void @exp2_f32
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vexp2q_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call float @llvm.exp2.f32(float [[IN:%.*]]) #[[ATTR6:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp2_f32
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svexp2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.exp2.f32(float [[IN:%.*]]) #[[ATTR10:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -165,10 +203,17 @@ declare double @llvm.exp10.f64(double)
 declare float @llvm.exp10.f32(float)
 
 define void @exp10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp10_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vexp10q_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svexp10_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+; NEON-LABEL: define void @exp10_f64
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vexp10q_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call double @llvm.exp10.f64(double [[IN:%.*]]) #[[ATTR7:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp10_f64
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svexp10_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.exp10.f64(double [[IN:%.*]]) #[[ATTR11:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -189,10 +234,17 @@ define void @exp10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
 }
 
 define void @exp10_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp10_f32(
-; NEON:     [[TMP5:%.*]] = call <4 x float> @armpl_vexp10q_f32(<4 x float> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svexp10_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK:    ret void
+; NEON-LABEL: define void @exp10_f32
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vexp10q_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call float @llvm.exp10.f32(float [[IN:%.*]]) #[[ATTR8:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp10_f32
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svexp10_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.exp10.f32(float [[IN:%.*]]) #[[ATTR12:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -216,10 +268,17 @@ declare double @llvm.log.f64(double)
 declare float @llvm.log.f32(float)
 
 define void @log_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vlogq_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svlog_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+; NEON-LABEL: define void @log_f64
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vlogq_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call double @llvm.log.f64(double [[IN:%.*]]) #[[ATTR9:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log_f64
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svlog_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.log.f64(double [[IN:%.*]]) #[[ATTR13:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -240,10 +299,17 @@ define void @log_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
 }
 
 define void @log_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vlogq_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svlog_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+; NEON-LABEL: define void @log_f32
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vlogq_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call float @llvm.log.f32(float [[IN:%.*]]) #[[ATTR10:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log_f32
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svlog_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.log.f32(float [[IN:%.*]]) #[[ATTR14:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -267,10 +333,17 @@ declare double @llvm.log2.f64(double)
 declare float @llvm.log2.f32(float)
 
 define void @log2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log2_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vlog2q_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svlog2_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+; NEON-LABEL: define void @log2_f64
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vlog2q_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call double @llvm.log2.f64(double [[IN:%.*]]) #[[ATTR11:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log2_f64
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svlog2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.log2.f64(double [[IN:%.*]]) #[[ATTR15:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -291,10 +364,17 @@ define void @log2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
 }
 
 define void @log2_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log2_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vlog2q_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svlog2_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+; NEON-LABEL: define void @log2_f32
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vlog2q_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call float @llvm.log2.f32(float [[IN:%.*]]) #[[ATTR12:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log2_f32
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svlog2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.log2.f32(float [[IN:%.*]]) #[[ATTR16:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -318,10 +398,17 @@ declare double @llvm.log10.f64(double)
 declare float @llvm.log10.f32(float)
 
 define void @log10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log10_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vlog10q_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svlog10_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+; NEON-LABEL: define void @log10_f64
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vlog10q_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call double @llvm.log10.f64(double [[IN:%.*]]) #[[ATTR13:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log10_f64
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svlog10_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.log10.f64(double [[IN:%.*]]) #[[ATTR17:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -342,10 +429,17 @@ define void @log10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
 }
 
 define void @log10_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log10_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vlog10q_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+; NEON-LABEL: define void @log10_f32
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vlog10q_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call float @llvm.log10.f32(float [[IN:%.*]]) #[[ATTR14:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log10_f32
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.log10.f32(float [[IN:%.*]]) #[[ATTR18:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -369,10 +463,17 @@ declare double @llvm.sin.f64(double)
 declare float @llvm.sin.f32(float)
 
 define void @sin_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @sin_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vsinq_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+; NEON-LABEL: define void @sin_f64
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vsinq_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call double @llvm.sin.f64(double [[IN:%.*]]) #[[ATTR15:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @sin_f64
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.sin.f64(double [[IN:%.*]]) #[[ATTR19:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -393,10 +494,17 @@ define void @sin_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
 }
 
 define void @sin_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @sin_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vsinq_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svsin_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+; NEON-LABEL: define void @sin_f32
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vsinq_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    [[CALL:%.*]] = tail call float @llvm.sin.f32(float [[IN:%.*]]) #[[ATTR16:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @sin_f32
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svsin_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.sin.f32(float [[IN:%.*]]) #[[ATTR20:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -420,10 +528,17 @@ declare double @llvm.pow.f64(double, double)
 declare float @llvm.pow.f32(float, float)
 
 define void @pow_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @pow_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vpowq_f64(<2 x double> [[TMP4:%.*]], <2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svpow_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+; NEON-LABEL: define void @pow_f64
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vpowq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
+; NEON:    [[CALL:%.*]] = tail call double @llvm.pow.f64(double [[IN:%.*]], double [[IN]]) #[[ATTR17:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @pow_f64
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svpow_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.pow.f64(double [[IN:%.*]], double [[IN]]) #[[ATTR21:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -444,10 +559,17 @@ define void @pow_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
 }
 
 define void @pow_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @pow_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vpowq_f32(<4 x float> [[TMP4:%.*]], <4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svpow_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+; NEON-LABEL: define void @pow_f32
+; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vpowq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
+; NEON:    [[CALL:%.*]] = tail call float @llvm.pow.f32(float [[IN:%.*]], float [[IN]]) #[[ATTR18:[0-9]+]]
+; NEON:    ret void
+;
+; SVE-LABEL: define void @pow_f32
+; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svpow_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.pow.f32(float [[IN:%.*]], float [[IN]]) #[[ATTR22:[0-9]+]]
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -466,4 +588,3 @@ define void @pow_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
   for.end:
   ret void
 }
-
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll
index 01ce3657835a46e..2b95a861c358baf 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll
@@ -1,11 +1,9 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(\.|_)(ceil|copysign|cos|exp\.|expf?\(|exp2|exp10|fabs|floor|fma|log|m..num|pow|.*int|round|sin|sqrt|trunc)|(ret)" --version 2
-; RUN: opt -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize -force-vector-interleave=1 -S < %s | FileCheck %s --check-prefix=NEON
-; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize -force-vector-interleave=1 -S < %s | FileCheck %s --check-prefix=SVE
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(\.|_)(ceil|copysign|cos|exp\.|expf?\(|exp2|exp10|fabs|floor|fma|log|m..num|pow|nearbyint|rint|round|sin|sqrt|trunc)|(ret)" --version 2
+; RUN: opt -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefix=NEON
+; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefix=SVE
 
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-linux-gnu"
 
-
 ; Tests are checking if LV can vectorize loops with llvm math intrinsics using mappings
 ; from TLI (if such mappings exist) for scalable and fixed width vectors.
 
@@ -21,7 +19,7 @@ define void @llvm_ceil_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_ceil_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1:[0-9]+]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.ceil.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -52,7 +50,7 @@ define void @llvm_ceil_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_ceil_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.ceil.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -86,7 +84,7 @@ define void @llvm_copysign_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_copysign_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x double> [[TMP11]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.copysign.f64(double [[CONV:%.*]], double [[CONV]])
 ; SVE:    ret void
 ;
@@ -117,7 +115,7 @@ define void @llvm_copysign_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_copysign_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x float> [[TMP11]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.copysign.f32(float [[CONV:%.*]], float [[CONV]])
 ; SVE:    ret void
 ;
@@ -151,8 +149,8 @@ define void @llvm_cos_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_cos_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_cos(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.cos.f64(double [[CONV:%.*]]) #[[ATTR4:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_cos(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.cos.f64(double [[CONV:%.*]]) #[[ATTR5:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -182,8 +180,8 @@ define void @llvm_cos_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_cos_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_cosf(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.cos.f32(float [[CONV:%.*]]) #[[ATTR5:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_cosf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.cos.f32(float [[CONV:%.*]]) #[[ATTR6:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -216,8 +214,8 @@ define void @llvm_exp_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_exp_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp.f64(double [[CONV:%.*]]) #[[ATTR6:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.exp.f64(double [[CONV:%.*]]) #[[ATTR7:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -247,8 +245,8 @@ define void @llvm_exp_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_exp_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_expf(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp.f32(float [[CONV:%.*]]) #[[ATTR7:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_expf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.exp.f32(float [[CONV:%.*]]) #[[ATTR8:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -281,8 +279,8 @@ define void @llvm_exp2_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_exp2_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp2(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp2.f64(double [[CONV:%.*]]) #[[ATTR8:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp2(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.exp2.f64(double [[CONV:%.*]]) #[[ATTR9:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -312,8 +310,8 @@ define void @llvm_exp2_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_exp2_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp2f(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp2.f32(float [[CONV:%.*]]) #[[ATTR9:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp2f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.exp2.f32(float [[CONV:%.*]]) #[[ATTR10:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -346,8 +344,8 @@ define void @llvm_exp10_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_exp10_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp10(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp10.f64(double [[CONV:%.*]]) #[[ATTR10:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp10(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.exp10.f64(double [[CONV:%.*]]) #[[ATTR11:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -377,8 +375,8 @@ define void @llvm_exp10_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_exp10_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp10f(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp10.f32(float [[CONV:%.*]]) #[[ATTR11:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp10f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.exp10.f32(float [[CONV:%.*]]) #[[ATTR12:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -411,7 +409,7 @@ define void @llvm_fabs_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_fabs_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.fabs.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -443,7 +441,7 @@ define void @llvm_fabs_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_fabs_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.fabs.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -477,7 +475,7 @@ define void @llvm_floor_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_floor_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.floor.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -508,7 +506,7 @@ define void @llvm_floor_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_floor_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.floor.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.floor.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.floor.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -542,7 +540,7 @@ define void @llvm_fma_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_fma_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x double> [[TMP11]], <vscale x 2 x double> [[TMP11]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]], <vscale x 2 x double> [[TMP17]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.fma.f64(double [[CONV:%.*]], double [[CONV]], double [[CONV]])
 ; SVE:    ret void
 ;
@@ -573,7 +571,7 @@ define void @llvm_fma_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_fma_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x float> [[TMP11]], <vscale x 4 x float> [[TMP11]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]], <vscale x 4 x float> [[TMP17]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.fma.f32(float [[CONV:%.*]], float [[CONV]], float [[CONV]])
 ; SVE:    ret void
 ;
@@ -607,8 +605,8 @@ define void @llvm_log_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_log_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log.f64(double [[CONV:%.*]]) #[[ATTR12:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.log.f64(double [[CONV:%.*]]) #[[ATTR13:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -638,8 +636,8 @@ define void @llvm_log_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_log_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_logf(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log.f32(float [[CONV:%.*]]) #[[ATTR13:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_logf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.log.f32(float [[CONV:%.*]]) #[[ATTR14:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -672,8 +670,8 @@ define void @llvm_log10_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_log10_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log10(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log10.f64(double [[CONV:%.*]]) #[[ATTR14:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log10(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.log10.f64(double [[CONV:%.*]]) #[[ATTR15:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -703,8 +701,8 @@ define void @llvm_log10_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_log10_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log10f(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log10.f32(float [[CONV:%.*]]) #[[ATTR15:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log10f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.log10.f32(float [[CONV:%.*]]) #[[ATTR16:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -737,8 +735,8 @@ define void @llvm_log2_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_log2_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log2(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log2.f64(double [[CONV:%.*]]) #[[ATTR16:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log2(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.log2.f64(double [[CONV:%.*]]) #[[ATTR17:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -768,8 +766,8 @@ define void @llvm_log2_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_log2_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log2f(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log2.f32(float [[CONV:%.*]]) #[[ATTR17:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log2f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.log2.f32(float [[CONV:%.*]]) #[[ATTR18:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -802,7 +800,7 @@ define void @llvm_maxnum_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_maxnum_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x double> [[TMP11]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.maxnum.f64(double [[CONV:%.*]], double [[CONV]])
 ; SVE:    ret void
 ;
@@ -833,7 +831,7 @@ define void @llvm_maxnum_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_maxnum_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x float> [[TMP11]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.maxnum.f32(float [[CONV:%.*]], float [[CONV]])
 ; SVE:    ret void
 ;
@@ -867,7 +865,7 @@ define void @llvm_minnum_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_minnum_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x double> [[TMP11]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.minnum.f64(double [[CONV:%.*]], double [[CONV]])
 ; SVE:    ret void
 ;
@@ -898,7 +896,7 @@ define void @llvm_minnum_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_minnum_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x float> [[TMP11]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.minnum.f32(float [[CONV:%.*]], float [[CONV]])
 ; SVE:    ret void
 ;
@@ -932,7 +930,7 @@ define void @llvm_nearbyint_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_nearbyint_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.nearbyint.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -963,7 +961,7 @@ define void @llvm_nearbyint_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_nearbyint_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.nearbyint.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.nearbyint.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.nearbyint.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -997,8 +995,8 @@ define void @llvm_pow_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_pow_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxvv_pow(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x double> [[TMP11]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.pow.f64(double [[CONV:%.*]], double [[CONV]]) #[[ATTR18:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxvv_pow(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.pow.f64(double [[CONV:%.*]], double [[CONV]]) #[[ATTR19:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -1028,8 +1026,8 @@ define void @llvm_pow_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_pow_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxvv_powf(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x float> [[TMP11]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.pow.f32(float [[CONV:%.*]], float [[CONV]]) #[[ATTR19:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxvv_powf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.pow.f32(float [[CONV:%.*]], float [[CONV]]) #[[ATTR20:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -1062,7 +1060,7 @@ define void @llvm_rint_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_rint_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.rint.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -1093,7 +1091,7 @@ define void @llvm_rint_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_rint_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.rint.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.rint.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.rint.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -1127,7 +1125,7 @@ define void @llvm_round_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_round_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.round.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -1158,7 +1156,7 @@ define void @llvm_round_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_round_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.round.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -1192,8 +1190,8 @@ define void @llvm_sin_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_sin_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_sin(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.sin.f64(double [[CONV:%.*]]) #[[ATTR20:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_sin(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call double @llvm.sin.f64(double [[CONV:%.*]]) #[[ATTR21:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -1223,8 +1221,8 @@ define void @llvm_sin_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_sin_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_sinf(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.sin.f32(float [[CONV:%.*]]) #[[ATTR21:[0-9]+]]
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_sinf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    [[CALL:%.*]] = tail call float @llvm.sin.f32(float [[CONV:%.*]]) #[[ATTR22:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -1257,7 +1255,7 @@ define void @llvm_sqrt_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_sqrt_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.sqrt.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -1288,7 +1286,7 @@ define void @llvm_sqrt_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_sqrt_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.sqrt.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -1322,7 +1320,7 @@ define void @llvm_trunc_f64(double* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_trunc_f64
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call double @llvm.trunc.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
@@ -1353,7 +1351,7 @@ define void @llvm_trunc_f32(float* nocapture %varray) {
 ;
 ; SVE-LABEL: define void @llvm_trunc_f32
 ; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    [[CALL:%.*]] = tail call float @llvm.trunc.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;

>From cace1ec7346d3dfee9fcc5d67d79bce989b207d1 Mon Sep 17 00:00:00 2001
From: Paschalis Mpeis <Paschalis.Mpeis at arm.com>
Date: Mon, 27 Nov 2023 17:36:29 +0000
Subject: [PATCH 2/2] Add `simplifycfg` pass and `noalias` to ensure tail
 folding.

`noalias` attribute was added only to the `%in.ptr` parameter of the
ArmPL Intrinsics.
---
 .../LoopVectorize/AArch64/armpl-intrinsics.ll | 212 +++++------
 .../AArch64/sleef-intrinsic-calls-aarch64.ll  | 340 +++++++-----------
 2 files changed, 216 insertions(+), 336 deletions(-)

diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll
index 07b1402b4697fa2..96d94f72fabf06d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(\.|_v|_sv)(ceil|copysign|cos|exp\.|expf?\(|exp2|exp10|fabs|floor|fma|log|m..num|pow|nearbyint|rint|round|sin|sqrt|trunc)|(ret)" --version 2
-; RUN: opt -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -prefer-predicate-over-epilogue=predicate-dont-vectorize  -S < %s | FileCheck %s --check-prefixes=NEON
-; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefixes=SVE
+; RUN: opt -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -prefer-predicate-over-epilogue=predicate-dont-vectorize  -S < %s | FileCheck %s --check-prefixes=NEON
+; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefixes=SVE
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -10,18 +10,16 @@ target triple = "aarch64-unknown-linux-gnu"
 declare double @llvm.cos.f64(double)
 declare float @llvm.cos.f32(float)
 
-define void @cos_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @cos_f64(ptr noalias %in.ptr, ptr %out.ptr) {
 ;
 ; NEON-LABEL: define void @cos_f64
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vcosq_f64(<2 x double> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.cos.f64(double [[IN:%.*]]) #[[ATTR1:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vcosq_f64(<2 x double> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @cos_f64
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1:[0-9]+]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svcos_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.cos.f64(double [[IN:%.*]]) #[[ATTR5:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1:[0-9]+]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svcos_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -42,17 +40,15 @@ define void @cos_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @cos_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @cos_f32(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @cos_f32
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vcosq_f32(<4 x float> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.cos.f32(float [[IN:%.*]]) #[[ATTR2:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vcosq_f32(<4 x float> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @cos_f32
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svcos_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.cos.f32(float [[IN:%.*]]) #[[ATTR6:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svcos_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -76,15 +72,13 @@ define void @cos_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.exp.f64(double)
 declare float @llvm.exp.f32(float)
 
-define void @exp_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @exp_f64(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @exp_f64
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[CALL:%.*]] = tail call double @llvm.exp.f64(double [[IN:%.*]]) #[[ATTR3:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @exp_f64
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp.f64(double [[IN:%.*]]) #[[ATTR7:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
 ; SVE:    ret void
 ;
   entry:
@@ -105,15 +99,13 @@ define void @exp_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @exp_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @exp_f32(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @exp_f32
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[CALL:%.*]] = tail call float @llvm.exp.f32(float [[IN:%.*]]) #[[ATTR4:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @exp_f32
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp.f32(float [[IN:%.*]]) #[[ATTR8:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
 ; SVE:    ret void
 ;
   entry:
@@ -137,17 +129,15 @@ define void @exp_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.exp2.f64(double)
 declare float @llvm.exp2.f32(float)
 
-define void @exp2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @exp2_f64(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @exp2_f64
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vexp2q_f64(<2 x double> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.exp2.f64(double [[IN:%.*]]) #[[ATTR5:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vexp2q_f64(<2 x double> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @exp2_f64
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svexp2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp2.f64(double [[IN:%.*]]) #[[ATTR9:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svexp2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -168,17 +158,15 @@ define void @exp2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @exp2_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @exp2_f32(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @exp2_f32
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vexp2q_f32(<4 x float> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.exp2.f32(float [[IN:%.*]]) #[[ATTR6:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vexp2q_f32(<4 x float> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @exp2_f32
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svexp2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp2.f32(float [[IN:%.*]]) #[[ATTR10:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svexp2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -202,17 +190,15 @@ define void @exp2_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.exp10.f64(double)
 declare float @llvm.exp10.f32(float)
 
-define void @exp10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @exp10_f64(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @exp10_f64
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vexp10q_f64(<2 x double> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.exp10.f64(double [[IN:%.*]]) #[[ATTR7:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vexp10q_f64(<2 x double> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @exp10_f64
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svexp10_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp10.f64(double [[IN:%.*]]) #[[ATTR11:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svexp10_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -233,17 +219,15 @@ define void @exp10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @exp10_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @exp10_f32(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @exp10_f32
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vexp10q_f32(<4 x float> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.exp10.f32(float [[IN:%.*]]) #[[ATTR8:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vexp10q_f32(<4 x float> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @exp10_f32
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svexp10_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp10.f32(float [[IN:%.*]]) #[[ATTR12:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svexp10_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -267,17 +251,15 @@ define void @exp10_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.log.f64(double)
 declare float @llvm.log.f32(float)
 
-define void @log_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @log_f64(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @log_f64
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vlogq_f64(<2 x double> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.log.f64(double [[IN:%.*]]) #[[ATTR9:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vlogq_f64(<2 x double> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @log_f64
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svlog_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log.f64(double [[IN:%.*]]) #[[ATTR13:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svlog_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -298,17 +280,15 @@ define void @log_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @log_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @log_f32(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @log_f32
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vlogq_f32(<4 x float> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.log.f32(float [[IN:%.*]]) #[[ATTR10:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vlogq_f32(<4 x float> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @log_f32
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svlog_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log.f32(float [[IN:%.*]]) #[[ATTR14:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svlog_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -332,17 +312,15 @@ define void @log_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.log2.f64(double)
 declare float @llvm.log2.f32(float)
 
-define void @log2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @log2_f64(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @log2_f64
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vlog2q_f64(<2 x double> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.log2.f64(double [[IN:%.*]]) #[[ATTR11:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vlog2q_f64(<2 x double> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @log2_f64
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svlog2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log2.f64(double [[IN:%.*]]) #[[ATTR15:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svlog2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -363,17 +341,15 @@ define void @log2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @log2_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @log2_f32(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @log2_f32
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vlog2q_f32(<4 x float> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.log2.f32(float [[IN:%.*]]) #[[ATTR12:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vlog2q_f32(<4 x float> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @log2_f32
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svlog2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log2.f32(float [[IN:%.*]]) #[[ATTR16:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svlog2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -397,17 +373,15 @@ define void @log2_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.log10.f64(double)
 declare float @llvm.log10.f32(float)
 
-define void @log10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @log10_f64(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @log10_f64
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vlog10q_f64(<2 x double> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.log10.f64(double [[IN:%.*]]) #[[ATTR13:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vlog10q_f64(<2 x double> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @log10_f64
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svlog10_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log10.f64(double [[IN:%.*]]) #[[ATTR17:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svlog10_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -428,17 +402,15 @@ define void @log10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @log10_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @log10_f32(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @log10_f32
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vlog10q_f32(<4 x float> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.log10.f32(float [[IN:%.*]]) #[[ATTR14:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vlog10q_f32(<4 x float> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @log10_f32
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log10.f32(float [[IN:%.*]]) #[[ATTR18:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -462,17 +434,15 @@ define void @log10_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.sin.f64(double)
 declare float @llvm.sin.f32(float)
 
-define void @sin_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @sin_f64(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @sin_f64
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vsinq_f64(<2 x double> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.sin.f64(double [[IN:%.*]]) #[[ATTR15:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vsinq_f64(<2 x double> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @sin_f64
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.sin.f64(double [[IN:%.*]]) #[[ATTR19:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -493,17 +463,15 @@ define void @sin_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @sin_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @sin_f32(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @sin_f32
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vsinq_f32(<4 x float> [[WIDE_LOAD:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.sin.f32(float [[IN:%.*]]) #[[ATTR16:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vsinq_f32(<4 x float> [[WIDE_LOAD:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @sin_f32
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svsin_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.sin.f32(float [[IN:%.*]]) #[[ATTR20:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svsin_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -527,17 +495,15 @@ define void @sin_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.pow.f64(double, double)
 declare float @llvm.pow.f32(float, float)
 
-define void @pow_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @pow_f64(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @pow_f64
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <2 x double> @armpl_vpowq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.pow.f64(double [[IN:%.*]], double [[IN]]) #[[ATTR17:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vpowq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @pow_f64
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svpow_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.pow.f64(double [[IN:%.*]], double [[IN]]) #[[ATTR21:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svpow_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -558,17 +524,15 @@ define void @pow_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @pow_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
+define void @pow_f32(ptr noalias %in.ptr, ptr %out.ptr) {
 ; NEON-LABEL: define void @pow_f32
-; NEON-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
-; NEON:    [[TMP4:%.*]] = call <4 x float> @armpl_vpowq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.pow.f32(float [[IN:%.*]], float [[IN]]) #[[ATTR18:[0-9]+]]
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vpowq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @pow_f32
-; SVE-SAME: (ptr nocapture [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svpow_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.pow.f32(float [[IN:%.*]], float [[IN]]) #[[ATTR22:[0-9]+]]
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svpow_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll
index 2b95a861c358baf..2300ce74996e391 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(\.|_)(ceil|copysign|cos|exp\.|expf?\(|exp2|exp10|fabs|floor|fma|log|m..num|pow|nearbyint|rint|round|sin|sqrt|trunc)|(ret)" --version 2
-; RUN: opt -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefix=NEON
-; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefix=SVE
+; RUN: opt -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefix=NEON
+; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefix=SVE
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -10,17 +10,15 @@ target triple = "aarch64-unknown-linux-gnu"
 declare double @llvm.ceil.f64(double)
 declare float @llvm.ceil.f32(float)
 
-define void @llvm_ceil_f64(double* nocapture %varray) {
+define void @llvm_ceil_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_ceil_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.ceil.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_ceil_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1:[0-9]+]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1:[0-9]+]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.ceil.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -41,17 +39,15 @@ define void @llvm_ceil_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_ceil_f32(float* nocapture %varray) {
+define void @llvm_ceil_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_ceil_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.ceil.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_ceil_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.ceil.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -75,17 +71,15 @@ define void @llvm_ceil_f32(float* nocapture %varray) {
 declare double @llvm.copysign.f64(double, double)
 declare float @llvm.copysign.f32(float, float)
 
-define void @llvm_copysign_f64(double* nocapture %varray) {
+define void @llvm_copysign_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_copysign_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.copysign.v2f64(<2 x double> [[TMP1:%.*]], <2 x double> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.copysign.f64(double [[CONV:%.*]], double [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_copysign_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.copysign.f64(double [[CONV:%.*]], double [[CONV]])
 ; SVE:    ret void
 ;
   entry:
@@ -106,17 +100,15 @@ define void @llvm_copysign_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_copysign_f32(float* nocapture %varray) {
+define void @llvm_copysign_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_copysign_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.copysign.v4f32(<4 x float> [[TMP1:%.*]], <4 x float> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.copysign.f32(float [[CONV:%.*]], float [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_copysign_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.copysign.f32(float [[CONV:%.*]], float [[CONV]])
 ; SVE:    ret void
 ;
   entry:
@@ -140,17 +132,15 @@ define void @llvm_copysign_f32(float* nocapture %varray) {
 declare double @llvm.cos.f64(double)
 declare float @llvm.cos.f32(float)
 
-define void @llvm_cos_f64(double* nocapture %varray) {
+define void @llvm_cos_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_cos_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_cos(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.cos.f64(double [[CONV:%.*]]) #[[ATTR1:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_cos_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_cos(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.cos.f64(double [[CONV:%.*]]) #[[ATTR5:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -171,17 +161,15 @@ define void @llvm_cos_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_cos_f32(float* nocapture %varray) {
+define void @llvm_cos_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_cos_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_cosf(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.cos.f32(float [[CONV:%.*]]) #[[ATTR2:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_cos_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_cosf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.cos.f32(float [[CONV:%.*]]) #[[ATTR6:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -205,17 +193,15 @@ define void @llvm_cos_f32(float* nocapture %varray) {
 declare double @llvm.exp.f64(double)
 declare float @llvm.exp.f32(float)
 
-define void @llvm_exp_f64(double* nocapture %varray) {
+define void @llvm_exp_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_exp_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_exp(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.exp.f64(double [[CONV:%.*]]) #[[ATTR3:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp.f64(double [[CONV:%.*]]) #[[ATTR7:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -236,17 +222,15 @@ define void @llvm_exp_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_exp_f32(float* nocapture %varray) {
+define void @llvm_exp_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_exp_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_expf(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.exp.f32(float [[CONV:%.*]]) #[[ATTR4:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_expf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp.f32(float [[CONV:%.*]]) #[[ATTR8:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -270,17 +254,15 @@ define void @llvm_exp_f32(float* nocapture %varray) {
 declare double @llvm.exp2.f64(double)
 declare float @llvm.exp2.f32(float)
 
-define void @llvm_exp2_f64(double* nocapture %varray) {
+define void @llvm_exp2_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_exp2_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_exp2(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.exp2.f64(double [[CONV:%.*]]) #[[ATTR5:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp2_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp2(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp2.f64(double [[CONV:%.*]]) #[[ATTR9:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -301,17 +283,15 @@ define void @llvm_exp2_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_exp2_f32(float* nocapture %varray) {
+define void @llvm_exp2_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_exp2_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_exp2f(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.exp2.f32(float [[CONV:%.*]]) #[[ATTR6:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp2_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp2f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp2.f32(float [[CONV:%.*]]) #[[ATTR10:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -335,17 +315,15 @@ define void @llvm_exp2_f32(float* nocapture %varray) {
 declare double @llvm.exp10.f64(double)
 declare float @llvm.exp10.f32(float)
 
-define void @llvm_exp10_f64(double* nocapture %varray) {
+define void @llvm_exp10_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_exp10_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_exp10(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.exp10.f64(double [[CONV:%.*]]) #[[ATTR7:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp10_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp10(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp10.f64(double [[CONV:%.*]]) #[[ATTR11:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -366,17 +344,15 @@ define void @llvm_exp10_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_exp10_f32(float* nocapture %varray) {
+define void @llvm_exp10_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_exp10_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_exp10f(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.exp10.f32(float [[CONV:%.*]]) #[[ATTR8:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp10_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp10f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp10.f32(float [[CONV:%.*]]) #[[ATTR12:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -400,17 +376,15 @@ define void @llvm_exp10_f32(float* nocapture %varray) {
 declare double @llvm.fabs.f64(double)
 declare float @llvm.fabs.f32(float)
 
-define void @llvm_fabs_f64(double* nocapture %varray) {
+define void @llvm_fabs_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_fabs_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.fabs.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_fabs_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.fabs.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -432,17 +406,15 @@ define void @llvm_fabs_f64(double* nocapture %varray) {
 }
 
 
-define void @llvm_fabs_f32(float* nocapture %varray) {
+define void @llvm_fabs_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_fabs_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.fabs.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_fabs_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.fabs.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -466,17 +438,15 @@ define void @llvm_fabs_f32(float* nocapture %varray) {
 declare double @llvm.floor.f64(double)
 declare float @llvm.floor.f32(float)
 
-define void @llvm_floor_f64(double* nocapture %varray) {
+define void @llvm_floor_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_floor_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.floor.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_floor_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.floor.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -497,17 +467,15 @@ define void @llvm_floor_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_floor_f32(float* nocapture %varray) {
+define void @llvm_floor_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_floor_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.floor.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_floor_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.floor.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.floor.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -531,17 +499,15 @@ define void @llvm_floor_f32(float* nocapture %varray) {
 declare double @llvm.fma.f64(double, double, double)
 declare float @llvm.fma.f32(float, float, float)
 
-define void @llvm_fma_f64(double* nocapture %varray) {
+define void @llvm_fma_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_fma_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> [[TMP1:%.*]], <2 x double> [[TMP1]], <2 x double> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.fma.f64(double [[CONV:%.*]], double [[CONV]], double [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_fma_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]], <vscale x 2 x double> [[TMP17]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.fma.f64(double [[CONV:%.*]], double [[CONV]], double [[CONV]])
 ; SVE:    ret void
 ;
   entry:
@@ -562,17 +528,15 @@ define void @llvm_fma_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_fma_f32(float* nocapture %varray) {
+define void @llvm_fma_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_fma_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[TMP1:%.*]], <4 x float> [[TMP1]], <4 x float> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.fma.f32(float [[CONV:%.*]], float [[CONV]], float [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_fma_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]], <vscale x 4 x float> [[TMP17]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.fma.f32(float [[CONV:%.*]], float [[CONV]], float [[CONV]])
 ; SVE:    ret void
 ;
   entry:
@@ -596,17 +560,15 @@ define void @llvm_fma_f32(float* nocapture %varray) {
 declare double @llvm.log.f64(double)
 declare float @llvm.log.f32(float)
 
-define void @llvm_log_f64(double* nocapture %varray) {
+define void @llvm_log_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_log_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_log(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.log.f64(double [[CONV:%.*]]) #[[ATTR9:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log.f64(double [[CONV:%.*]]) #[[ATTR13:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -627,17 +589,15 @@ define void @llvm_log_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_log_f32(float* nocapture %varray) {
+define void @llvm_log_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_log_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_logf(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.log.f32(float [[CONV:%.*]]) #[[ATTR10:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_logf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log.f32(float [[CONV:%.*]]) #[[ATTR14:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -661,17 +621,15 @@ define void @llvm_log_f32(float* nocapture %varray) {
 declare double @llvm.log10.f64(double)
 declare float @llvm.log10.f32(float)
 
-define void @llvm_log10_f64(double* nocapture %varray) {
+define void @llvm_log10_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_log10_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_log10(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.log10.f64(double [[CONV:%.*]]) #[[ATTR11:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log10_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log10(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log10.f64(double [[CONV:%.*]]) #[[ATTR15:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -692,17 +650,15 @@ define void @llvm_log10_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_log10_f32(float* nocapture %varray) {
+define void @llvm_log10_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_log10_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_log10f(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.log10.f32(float [[CONV:%.*]]) #[[ATTR12:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log10_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log10f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log10.f32(float [[CONV:%.*]]) #[[ATTR16:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -726,17 +682,15 @@ define void @llvm_log10_f32(float* nocapture %varray) {
 declare double @llvm.log2.f64(double)
 declare float @llvm.log2.f32(float)
 
-define void @llvm_log2_f64(double* nocapture %varray) {
+define void @llvm_log2_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_log2_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_log2(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.log2.f64(double [[CONV:%.*]]) #[[ATTR13:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log2_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log2(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log2.f64(double [[CONV:%.*]]) #[[ATTR17:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -757,17 +711,15 @@ define void @llvm_log2_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_log2_f32(float* nocapture %varray) {
+define void @llvm_log2_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_log2_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_log2f(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.log2.f32(float [[CONV:%.*]]) #[[ATTR14:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log2_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log2f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log2.f32(float [[CONV:%.*]]) #[[ATTR18:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -791,17 +743,15 @@ define void @llvm_log2_f32(float* nocapture %varray) {
 declare double @llvm.maxnum.f64(double, double)
 declare float @llvm.maxnum.f32(float, float)
 
-define void @llvm_maxnum_f64(double* nocapture %varray) {
+define void @llvm_maxnum_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_maxnum_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP1:%.*]], <2 x double> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.maxnum.f64(double [[CONV:%.*]], double [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_maxnum_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.maxnum.f64(double [[CONV:%.*]], double [[CONV]])
 ; SVE:    ret void
 ;
   entry:
@@ -822,17 +772,15 @@ define void @llvm_maxnum_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_maxnum_f32(float* nocapture %varray) {
+define void @llvm_maxnum_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_maxnum_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP1:%.*]], <4 x float> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.maxnum.f32(float [[CONV:%.*]], float [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_maxnum_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.maxnum.f32(float [[CONV:%.*]], float [[CONV]])
 ; SVE:    ret void
 ;
   entry:
@@ -856,17 +804,15 @@ define void @llvm_maxnum_f32(float* nocapture %varray) {
 declare double @llvm.minnum.f64(double, double)
 declare float @llvm.minnum.f32(float, float)
 
-define void @llvm_minnum_f64(double* nocapture %varray) {
+define void @llvm_minnum_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_minnum_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP1:%.*]], <2 x double> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.minnum.f64(double [[CONV:%.*]], double [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_minnum_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.minnum.f64(double [[CONV:%.*]], double [[CONV]])
 ; SVE:    ret void
 ;
   entry:
@@ -887,17 +833,15 @@ define void @llvm_minnum_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_minnum_f32(float* nocapture %varray) {
+define void @llvm_minnum_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_minnum_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP1:%.*]], <4 x float> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.minnum.f32(float [[CONV:%.*]], float [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_minnum_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.minnum.f32(float [[CONV:%.*]], float [[CONV]])
 ; SVE:    ret void
 ;
   entry:
@@ -921,17 +865,15 @@ define void @llvm_minnum_f32(float* nocapture %varray) {
 declare double @llvm.nearbyint.f64(double)
 declare float @llvm.nearbyint.f32(float)
 
-define void @llvm_nearbyint_f64(double* nocapture %varray) {
+define void @llvm_nearbyint_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_nearbyint_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.nearbyint.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_nearbyint_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.nearbyint.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -952,17 +894,15 @@ define void @llvm_nearbyint_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_nearbyint_f32(float* nocapture %varray) {
+define void @llvm_nearbyint_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_nearbyint_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.nearbyint.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_nearbyint_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.nearbyint.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.nearbyint.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -986,17 +926,15 @@ define void @llvm_nearbyint_f32(float* nocapture %varray) {
 declare double @llvm.pow.f64(double, double)
 declare float @llvm.pow.f32(float, float)
 
-define void @llvm_pow_f64(double* nocapture %varray) {
+define void @llvm_pow_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_pow_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2vv_pow(<2 x double> [[TMP1:%.*]], <2 x double> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.pow.f64(double [[CONV:%.*]], double [[CONV]]) #[[ATTR15:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_pow_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxvv_pow(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.pow.f64(double [[CONV:%.*]], double [[CONV]]) #[[ATTR19:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -1017,17 +955,15 @@ define void @llvm_pow_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_pow_f32(float* nocapture %varray) {
+define void @llvm_pow_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_pow_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4vv_powf(<4 x float> [[TMP1:%.*]], <4 x float> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.pow.f32(float [[CONV:%.*]], float [[CONV]]) #[[ATTR16:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_pow_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxvv_powf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.pow.f32(float [[CONV:%.*]], float [[CONV]]) #[[ATTR20:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -1051,17 +987,15 @@ define void @llvm_pow_f32(float* nocapture %varray) {
 declare double @llvm.rint.f64(double)
 declare float @llvm.rint.f32(float)
 
-define void @llvm_rint_f64(double* nocapture %varray) {
+define void @llvm_rint_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_rint_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.rint.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_rint_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.rint.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1082,17 +1016,15 @@ define void @llvm_rint_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_rint_f32(float* nocapture %varray) {
+define void @llvm_rint_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_rint_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.rint.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_rint_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.rint.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.rint.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1116,17 +1048,15 @@ define void @llvm_rint_f32(float* nocapture %varray) {
 declare double @llvm.round.f64(double)
 declare float @llvm.round.f32(float)
 
-define void @llvm_round_f64(double* nocapture %varray) {
+define void @llvm_round_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_round_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.round.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_round_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.round.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1147,17 +1077,15 @@ define void @llvm_round_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_round_f32(float* nocapture %varray) {
+define void @llvm_round_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_round_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.round.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.round.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_round_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.round.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1181,17 +1109,15 @@ define void @llvm_round_f32(float* nocapture %varray) {
 declare double @llvm.sin.f64(double)
 declare float @llvm.sin.f32(float)
 
-define void @llvm_sin_f64(double* nocapture %varray) {
+define void @llvm_sin_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_sin_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_sin(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.sin.f64(double [[CONV:%.*]]) #[[ATTR17:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_sin_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_sin(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.sin.f64(double [[CONV:%.*]]) #[[ATTR21:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -1212,17 +1138,15 @@ define void @llvm_sin_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_sin_f32(float* nocapture %varray) {
+define void @llvm_sin_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_sin_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_sinf(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.sin.f32(float [[CONV:%.*]]) #[[ATTR18:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_sin_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_sinf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.sin.f32(float [[CONV:%.*]]) #[[ATTR22:[0-9]+]]
 ; SVE:    ret void
 ;
   entry:
@@ -1246,17 +1170,15 @@ define void @llvm_sin_f32(float* nocapture %varray) {
 declare double @llvm.sqrt.f64(double)
 declare float @llvm.sqrt.f32(float)
 
-define void @llvm_sqrt_f64(double* nocapture %varray) {
+define void @llvm_sqrt_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_sqrt_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.sqrt.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_sqrt_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.sqrt.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1277,17 +1199,15 @@ define void @llvm_sqrt_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_sqrt_f32(float* nocapture %varray) {
+define void @llvm_sqrt_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_sqrt_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.sqrt.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_sqrt_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.sqrt.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1311,17 +1231,15 @@ define void @llvm_sqrt_f32(float* nocapture %varray) {
 declare double @llvm.trunc.f64(double)
 declare float @llvm.trunc.f32(float)
 
-define void @llvm_trunc_f64(double* nocapture %varray) {
+define void @llvm_trunc_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_trunc_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.trunc.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_trunc_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.trunc.f64(double [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1342,17 +1260,15 @@ define void @llvm_trunc_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_trunc_f32(float* nocapture %varray) {
+define void @llvm_trunc_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_trunc_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.trunc.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_trunc_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
 ; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.trunc.f32(float [[CONV:%.*]])
 ; SVE:    ret void
 ;
   entry:



More information about the llvm-commits mailing list