[llvm] 1bfb84b - [NFC][TLI] Improve tests for ArmPL and SLEEF Intrinsics. (#73352)

via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 29 03:19:15 PST 2023


Author: Paschalis Mpeis
Date: 2023-11-29T11:19:10Z
New Revision: 1bfb84b47738155cc3c290ac7034075bec60920b

URL: https://github.com/llvm/llvm-project/commit/1bfb84b47738155cc3c290ac7034075bec60920b
DIFF: https://github.com/llvm/llvm-project/commit/1bfb84b47738155cc3c290ac7034075bec60920b.diff

LOG: [NFC][TLI] Improve tests for ArmPL and SLEEF Intrinsics. (#73352)

Auto-generate test `armpl-intrinsics.ll` and simplify tests:
- Eliminate scalar tail with no tail-folding flag.
- Use active lane mask for shorter check lines (no long `shufflevectors`).
- Eliminate scalar loops by providing `noalias` to relevant arguments and
run `simplifycfg` to drop them.
- Update script now use `@llvm.compiler.used` instead of a longer regex.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll
    llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll
    llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll b/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll
index a38d4a53407c5d2..18431ae021f9766 100644
--- a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll
+++ b/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll
@@ -15,7 +15,7 @@ declare <vscale x 2 x double> @llvm.cos.nxv2f64(<vscale x 2 x double>)
 declare <vscale x 4 x float> @llvm.cos.nxv4f32(<vscale x 4 x float>)
 
 ;.
-; CHECK: @[[LLVM_COMPILER_USED:[a-zA-Z0-9_$"\\.-]+]] = appending global [16 x ptr] [ptr @armpl_vcosq_f64, ptr @armpl_vcosq_f32, ptr @armpl_vsinq_f64, ptr @armpl_vsinq_f32, ptr @armpl_vexpq_f64, ptr @armpl_vexpq_f32, ptr @armpl_vexp2q_f64, ptr @armpl_vexp2q_f32, ptr @armpl_vexp10q_f64, ptr @armpl_vexp10q_f32, ptr @armpl_vlogq_f64, ptr @armpl_vlogq_f32, ptr @armpl_vlog2q_f64, ptr @armpl_vlog2q_f32, ptr @armpl_vlog10q_f64, ptr @armpl_vlog10q_f32], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [16 x ptr] [ptr @armpl_vcosq_f64, ptr @armpl_vcosq_f32, ptr @armpl_vsinq_f64, ptr @armpl_vsinq_f32, ptr @armpl_vexpq_f64, ptr @armpl_vexpq_f32, ptr @armpl_vexp2q_f64, ptr @armpl_vexp2q_f32, ptr @armpl_vexp10q_f64, ptr @armpl_vexp10q_f32, ptr @armpl_vlogq_f64, ptr @armpl_vlogq_f32, ptr @armpl_vlog2q_f64, ptr @armpl_vlog2q_f32, ptr @armpl_vlog10q_f64, ptr @armpl_vlog10q_f32], section "llvm.metadata"
 ;.
 define <2 x double> @llvm_cos_f64(<2 x double> %in) {
 ; CHECK-LABEL: define <2 x double> @llvm_cos_f64

diff  --git a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll b/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll
index cedb7dd85149d00..be247de368056e7 100644
--- a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll
+++ b/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll
@@ -4,7 +4,7 @@
 target triple = "aarch64-unknown-linux-gnu"
 
 ;.
-; CHECK: @[[LLVM_COMPILER_USED:[a-zA-Z0-9_$"\\.-]+]] = appending global [16 x ptr] [ptr @_ZGVnN2v_cos, ptr @_ZGVnN4v_cosf, ptr @_ZGVnN2v_exp, ptr @_ZGVnN4v_expf, ptr @_ZGVnN2v_exp2, ptr @_ZGVnN4v_exp2f, ptr @_ZGVnN2v_exp10, ptr @_ZGVnN4v_exp10f, ptr @_ZGVnN2v_log, ptr @_ZGVnN4v_logf, ptr @_ZGVnN2v_log10, ptr @_ZGVnN4v_log10f, ptr @_ZGVnN2v_log2, ptr @_ZGVnN4v_log2f, ptr @_ZGVnN2v_sin, ptr @_ZGVnN4v_sinf], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [16 x ptr] [ptr @_ZGVnN2v_cos, ptr @_ZGVnN4v_cosf, ptr @_ZGVnN2v_exp, ptr @_ZGVnN4v_expf, ptr @_ZGVnN2v_exp2, ptr @_ZGVnN4v_exp2f, ptr @_ZGVnN2v_exp10, ptr @_ZGVnN4v_exp10f, ptr @_ZGVnN2v_log, ptr @_ZGVnN4v_logf, ptr @_ZGVnN2v_log10, ptr @_ZGVnN4v_log10f, ptr @_ZGVnN2v_log2, ptr @_ZGVnN4v_log2f, ptr @_ZGVnN2v_sin, ptr @_ZGVnN4v_sinf], section "llvm.metadata"
 ;.
 define <2 x double> @llvm_ceil_f64(<2 x double> %in) {
 ; CHECK-LABEL: @llvm_ceil_f64(

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll
index 03d959c928577d5..96d94f72fabf06d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/armpl-intrinsics.ll
@@ -1,21 +1,26 @@
-; RUN: opt -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -S < %s | FileCheck %s --check-prefixes=CHECK,NEON
-; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize -S < %s | FileCheck %s --check-prefixes=CHECK,SVE
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(\.|_v|_sv)(ceil|copysign|cos|exp\.|expf?\(|exp2|exp10|fabs|floor|fma|log|m..num|pow|nearbyint|rint|round|sin|sqrt|trunc)|(ret)" --version 2
+; RUN: opt -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -prefer-predicate-over-epilogue=predicate-dont-vectorize  -S < %s | FileCheck %s --check-prefixes=NEON
+; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefixes=SVE
 
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-linux-gnu"
 
-
 ; Tests are checking if LV can vectorize loops with llvm math intrinsics
 ; using mappings from TLI for scalable and fixed width vectorization.
 
 declare double @llvm.cos.f64(double)
 declare float @llvm.cos.f32(float)
 
-define void @cos_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @cos_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vcosq_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svcos_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+define void @cos_f64(ptr noalias %in.ptr, ptr %out.ptr) {
+;
+; NEON-LABEL: define void @cos_f64
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vcosq_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @cos_f64
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1:[0-9]+]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svcos_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -35,11 +40,16 @@ define void @cos_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @cos_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @cos_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vcosq_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svcos_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+define void @cos_f32(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @cos_f32
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vcosq_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @cos_f32
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svcos_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -62,11 +72,14 @@ define void @cos_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.exp.f64(double)
 declare float @llvm.exp.f32(float)
 
-define void @exp_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vexpq_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svexp_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+define void @exp_f64(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @exp_f64
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp_f64
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -86,11 +99,14 @@ define void @exp_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @exp_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vexpq_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svexp_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+define void @exp_f32(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @exp_f32
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp_f32
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -113,11 +129,16 @@ define void @exp_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.exp2.f64(double)
 declare float @llvm.exp2.f32(float)
 
-define void @exp2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp2_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vexp2q_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svexp2_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+define void @exp2_f64(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @exp2_f64
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vexp2q_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp2_f64
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svexp2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -137,11 +158,16 @@ define void @exp2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @exp2_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp2_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vexp2q_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svexp2_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+define void @exp2_f32(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @exp2_f32
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vexp2q_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp2_f32
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svexp2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -164,11 +190,16 @@ define void @exp2_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.exp10.f64(double)
 declare float @llvm.exp10.f32(float)
 
-define void @exp10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp10_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vexp10q_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svexp10_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+define void @exp10_f64(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @exp10_f64
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vexp10q_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp10_f64
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svexp10_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -188,11 +219,16 @@ define void @exp10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @exp10_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @exp10_f32(
-; NEON:     [[TMP5:%.*]] = call <4 x float> @armpl_vexp10q_f32(<4 x float> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svexp10_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK:    ret void
+define void @exp10_f32(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @exp10_f32
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vexp10q_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @exp10_f32
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svexp10_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -215,11 +251,16 @@ define void @exp10_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.log.f64(double)
 declare float @llvm.log.f32(float)
 
-define void @log_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vlogq_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svlog_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+define void @log_f64(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @log_f64
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vlogq_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log_f64
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svlog_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -239,11 +280,16 @@ define void @log_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @log_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vlogq_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svlog_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+define void @log_f32(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @log_f32
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vlogq_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log_f32
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svlog_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -266,11 +312,16 @@ define void @log_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.log2.f64(double)
 declare float @llvm.log2.f32(float)
 
-define void @log2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log2_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vlog2q_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svlog2_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+define void @log2_f64(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @log2_f64
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vlog2q_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log2_f64
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svlog2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -290,11 +341,16 @@ define void @log2_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @log2_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log2_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vlog2q_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svlog2_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+define void @log2_f32(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @log2_f32
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vlog2q_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log2_f32
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svlog2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -317,11 +373,16 @@ define void @log2_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.log10.f64(double)
 declare float @llvm.log10.f32(float)
 
-define void @log10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log10_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vlog10q_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svlog10_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+define void @log10_f64(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @log10_f64
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vlog10q_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log10_f64
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svlog10_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -341,11 +402,16 @@ define void @log10_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @log10_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @log10_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vlog10q_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+define void @log10_f32(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @log10_f32
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vlog10q_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @log10_f32
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -368,11 +434,16 @@ define void @log10_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.sin.f64(double)
 declare float @llvm.sin.f32(float)
 
-define void @sin_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @sin_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vsinq_f64(<2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+define void @sin_f64(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @sin_f64
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vsinq_f64(<2 x double> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @sin_f64
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -392,11 +463,16 @@ define void @sin_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @sin_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @sin_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vsinq_f32(<4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svsin_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+define void @sin_f32(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @sin_f32
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vsinq_f32(<4 x float> [[WIDE_LOAD:%.*]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @sin_f32
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svsin_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -419,11 +495,16 @@ define void @sin_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
 declare double @llvm.pow.f64(double, double)
 declare float @llvm.pow.f32(float, float)
 
-define void @pow_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @pow_f64(
-; NEON:     [[TMP5:%.*]] = call <2 x double> @armpl_vpowq_f64(<2 x double> [[TMP4:%.*]], <2 x double> [[TMP4:%.*]])
-; SVE:      [[TMP5:%.*]] = call <vscale x 2 x double> @armpl_svpow_f64_x(<vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x double> [[TMP4:%.*]], <vscale x 2 x i1> {{.*}})
-; CHECK:    ret void
+define void @pow_f64(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @pow_f64
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <2 x double> @armpl_vpowq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @pow_f64
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 2 x double> @armpl_svpow_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -443,11 +524,16 @@ define void @pow_f64(ptr nocapture %in.ptr, ptr %out.ptr) {
   ret void
 }
 
-define void @pow_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
-; CHECK-LABEL: @pow_f32(
-; NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vpowq_f32(<4 x float> [[TMP4:%.*]], <4 x float> [[TMP4:%.*]])
-; SVE: [[TMP5:%.*]] = call <vscale x 4 x float> @armpl_svpow_f32_x(<vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x float> [[TMP4:%.*]], <vscale x 4 x i1> {{.*}})
-; CHECK: ret void
+define void @pow_f32(ptr noalias %in.ptr, ptr %out.ptr) {
+; NEON-LABEL: define void @pow_f32
+; NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) {
+; NEON:    [[TMP3:%.*]] = call <4 x float> @armpl_vpowq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
+; NEON:    ret void
+;
+; SVE-LABEL: define void @pow_f32
+; SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr [[OUT_PTR:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP13:%.*]] = call <vscale x 4 x float> @armpl_svpow_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; SVE:    ret void
 ;
   entry:
   br label %for.body
@@ -466,4 +552,3 @@ define void @pow_f32(ptr nocapture %in.ptr, ptr %out.ptr) {
   for.end:
   ret void
 }
-

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll
index 01ce3657835a46e..2300ce74996e391 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sleef-intrinsic-calls-aarch64.ll
@@ -1,28 +1,24 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(\.|_)(ceil|copysign|cos|exp\.|expf?\(|exp2|exp10|fabs|floor|fma|log|m..num|pow|.*int|round|sin|sqrt|trunc)|(ret)" --version 2
-; RUN: opt -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize -force-vector-interleave=1 -S < %s | FileCheck %s --check-prefix=NEON
-; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize -force-vector-interleave=1 -S < %s | FileCheck %s --check-prefix=SVE
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(\.|_)(ceil|copysign|cos|exp\.|expf?\(|exp2|exp10|fabs|floor|fma|log|m..num|pow|nearbyint|rint|round|sin|sqrt|trunc)|(ret)" --version 2
+; RUN: opt -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefix=NEON
+; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefix=SVE
 
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-linux-gnu"
 
-
 ; Tests are checking if LV can vectorize loops with llvm math intrinsics using mappings
 ; from TLI (if such mappings exist) for scalable and fixed width vectors.
 
 declare double @llvm.ceil.f64(double)
 declare float @llvm.ceil.f32(float)
 
-define void @llvm_ceil_f64(double* nocapture %varray) {
+define void @llvm_ceil_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_ceil_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.ceil.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_ceil_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1:[0-9]+]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.ceil.f64(double [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1:[0-9]+]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -43,17 +39,15 @@ define void @llvm_ceil_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_ceil_f32(float* nocapture %varray) {
+define void @llvm_ceil_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_ceil_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.ceil.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_ceil_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.ceil.f32(float [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -77,17 +71,15 @@ define void @llvm_ceil_f32(float* nocapture %varray) {
 declare double @llvm.copysign.f64(double, double)
 declare float @llvm.copysign.f32(float, float)
 
-define void @llvm_copysign_f64(double* nocapture %varray) {
+define void @llvm_copysign_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_copysign_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.copysign.v2f64(<2 x double> [[TMP1:%.*]], <2 x double> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.copysign.f64(double [[CONV:%.*]], double [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_copysign_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x double> [[TMP11]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.copysign.f64(double [[CONV:%.*]], double [[CONV]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]])
 ; SVE:    ret void
 ;
   entry:
@@ -108,17 +100,15 @@ define void @llvm_copysign_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_copysign_f32(float* nocapture %varray) {
+define void @llvm_copysign_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_copysign_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.copysign.v4f32(<4 x float> [[TMP1:%.*]], <4 x float> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.copysign.f32(float [[CONV:%.*]], float [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_copysign_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x float> [[TMP11]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.copysign.f32(float [[CONV:%.*]], float [[CONV]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]])
 ; SVE:    ret void
 ;
   entry:
@@ -142,17 +132,15 @@ define void @llvm_copysign_f32(float* nocapture %varray) {
 declare double @llvm.cos.f64(double)
 declare float @llvm.cos.f32(float)
 
-define void @llvm_cos_f64(double* nocapture %varray) {
+define void @llvm_cos_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_cos_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_cos(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.cos.f64(double [[CONV:%.*]]) #[[ATTR1:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_cos_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_cos(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.cos.f64(double [[CONV:%.*]]) #[[ATTR4:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_cos(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -173,17 +161,15 @@ define void @llvm_cos_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_cos_f32(float* nocapture %varray) {
+define void @llvm_cos_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_cos_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_cosf(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.cos.f32(float [[CONV:%.*]]) #[[ATTR2:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_cos_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_cosf(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.cos.f32(float [[CONV:%.*]]) #[[ATTR5:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_cosf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -207,17 +193,15 @@ define void @llvm_cos_f32(float* nocapture %varray) {
 declare double @llvm.exp.f64(double)
 declare float @llvm.exp.f32(float)
 
-define void @llvm_exp_f64(double* nocapture %varray) {
+define void @llvm_exp_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_exp_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_exp(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.exp.f64(double [[CONV:%.*]]) #[[ATTR3:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp.f64(double [[CONV:%.*]]) #[[ATTR6:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -238,17 +222,15 @@ define void @llvm_exp_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_exp_f32(float* nocapture %varray) {
+define void @llvm_exp_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_exp_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_expf(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.exp.f32(float [[CONV:%.*]]) #[[ATTR4:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_expf(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp.f32(float [[CONV:%.*]]) #[[ATTR7:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_expf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -272,17 +254,15 @@ define void @llvm_exp_f32(float* nocapture %varray) {
 declare double @llvm.exp2.f64(double)
 declare float @llvm.exp2.f32(float)
 
-define void @llvm_exp2_f64(double* nocapture %varray) {
+define void @llvm_exp2_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_exp2_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_exp2(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.exp2.f64(double [[CONV:%.*]]) #[[ATTR5:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp2_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp2(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp2.f64(double [[CONV:%.*]]) #[[ATTR8:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp2(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -303,17 +283,15 @@ define void @llvm_exp2_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_exp2_f32(float* nocapture %varray) {
+define void @llvm_exp2_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_exp2_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_exp2f(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.exp2.f32(float [[CONV:%.*]]) #[[ATTR6:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp2_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp2f(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp2.f32(float [[CONV:%.*]]) #[[ATTR9:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp2f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -337,17 +315,15 @@ define void @llvm_exp2_f32(float* nocapture %varray) {
 declare double @llvm.exp10.f64(double)
 declare float @llvm.exp10.f32(float)
 
-define void @llvm_exp10_f64(double* nocapture %varray) {
+define void @llvm_exp10_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_exp10_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_exp10(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.exp10.f64(double [[CONV:%.*]]) #[[ATTR7:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp10_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp10(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.exp10.f64(double [[CONV:%.*]]) #[[ATTR10:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp10(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -368,17 +344,15 @@ define void @llvm_exp10_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_exp10_f32(float* nocapture %varray) {
+define void @llvm_exp10_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_exp10_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_exp10f(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.exp10.f32(float [[CONV:%.*]]) #[[ATTR8:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_exp10_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp10f(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.exp10.f32(float [[CONV:%.*]]) #[[ATTR11:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp10f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -402,17 +376,15 @@ define void @llvm_exp10_f32(float* nocapture %varray) {
 declare double @llvm.fabs.f64(double)
 declare float @llvm.fabs.f32(float)
 
-define void @llvm_fabs_f64(double* nocapture %varray) {
+define void @llvm_fabs_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_fabs_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.fabs.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_fabs_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.fabs.f64(double [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -434,17 +406,15 @@ define void @llvm_fabs_f64(double* nocapture %varray) {
 }
 
 
-define void @llvm_fabs_f32(float* nocapture %varray) {
+define void @llvm_fabs_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_fabs_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.fabs.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_fabs_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.fabs.f32(float [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -468,17 +438,15 @@ define void @llvm_fabs_f32(float* nocapture %varray) {
 declare double @llvm.floor.f64(double)
 declare float @llvm.floor.f32(float)
 
-define void @llvm_floor_f64(double* nocapture %varray) {
+define void @llvm_floor_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_floor_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.floor.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_floor_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.floor.f64(double [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -499,17 +467,15 @@ define void @llvm_floor_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_floor_f32(float* nocapture %varray) {
+define void @llvm_floor_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_floor_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.floor.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_floor_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.floor.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.floor.f32(float [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.floor.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -533,17 +499,15 @@ define void @llvm_floor_f32(float* nocapture %varray) {
 declare double @llvm.fma.f64(double, double, double)
 declare float @llvm.fma.f32(float, float, float)
 
-define void @llvm_fma_f64(double* nocapture %varray) {
+define void @llvm_fma_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_fma_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> [[TMP1:%.*]], <2 x double> [[TMP1]], <2 x double> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.fma.f64(double [[CONV:%.*]], double [[CONV]], double [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_fma_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x double> [[TMP11]], <vscale x 2 x double> [[TMP11]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.fma.f64(double [[CONV:%.*]], double [[CONV]], double [[CONV]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]], <vscale x 2 x double> [[TMP17]])
 ; SVE:    ret void
 ;
   entry:
@@ -564,17 +528,15 @@ define void @llvm_fma_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_fma_f32(float* nocapture %varray) {
+define void @llvm_fma_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_fma_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[TMP1:%.*]], <4 x float> [[TMP1]], <4 x float> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.fma.f32(float [[CONV:%.*]], float [[CONV]], float [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_fma_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x float> [[TMP11]], <vscale x 4 x float> [[TMP11]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.fma.f32(float [[CONV:%.*]], float [[CONV]], float [[CONV]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]], <vscale x 4 x float> [[TMP17]])
 ; SVE:    ret void
 ;
   entry:
@@ -598,17 +560,15 @@ define void @llvm_fma_f32(float* nocapture %varray) {
 declare double @llvm.log.f64(double)
 declare float @llvm.log.f32(float)
 
-define void @llvm_log_f64(double* nocapture %varray) {
+define void @llvm_log_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_log_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_log(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.log.f64(double [[CONV:%.*]]) #[[ATTR9:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log.f64(double [[CONV:%.*]]) #[[ATTR12:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -629,17 +589,15 @@ define void @llvm_log_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_log_f32(float* nocapture %varray) {
+define void @llvm_log_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_log_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_logf(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.log.f32(float [[CONV:%.*]]) #[[ATTR10:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_logf(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log.f32(float [[CONV:%.*]]) #[[ATTR13:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_logf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -663,17 +621,15 @@ define void @llvm_log_f32(float* nocapture %varray) {
 declare double @llvm.log10.f64(double)
 declare float @llvm.log10.f32(float)
 
-define void @llvm_log10_f64(double* nocapture %varray) {
+define void @llvm_log10_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_log10_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_log10(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.log10.f64(double [[CONV:%.*]]) #[[ATTR11:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log10_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log10(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log10.f64(double [[CONV:%.*]]) #[[ATTR14:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log10(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -694,17 +650,15 @@ define void @llvm_log10_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_log10_f32(float* nocapture %varray) {
+define void @llvm_log10_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_log10_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_log10f(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.log10.f32(float [[CONV:%.*]]) #[[ATTR12:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log10_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log10f(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log10.f32(float [[CONV:%.*]]) #[[ATTR15:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log10f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -728,17 +682,15 @@ define void @llvm_log10_f32(float* nocapture %varray) {
 declare double @llvm.log2.f64(double)
 declare float @llvm.log2.f32(float)
 
-define void @llvm_log2_f64(double* nocapture %varray) {
+define void @llvm_log2_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_log2_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_log2(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.log2.f64(double [[CONV:%.*]]) #[[ATTR13:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log2_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log2(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.log2.f64(double [[CONV:%.*]]) #[[ATTR16:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log2(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -759,17 +711,15 @@ define void @llvm_log2_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_log2_f32(float* nocapture %varray) {
+define void @llvm_log2_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_log2_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_log2f(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.log2.f32(float [[CONV:%.*]]) #[[ATTR14:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_log2_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log2f(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.log2.f32(float [[CONV:%.*]]) #[[ATTR17:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log2f(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -793,17 +743,15 @@ define void @llvm_log2_f32(float* nocapture %varray) {
 declare double @llvm.maxnum.f64(double, double)
 declare float @llvm.maxnum.f32(float, float)
 
-define void @llvm_maxnum_f64(double* nocapture %varray) {
+define void @llvm_maxnum_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_maxnum_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> [[TMP1:%.*]], <2 x double> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.maxnum.f64(double [[CONV:%.*]], double [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_maxnum_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x double> [[TMP11]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.maxnum.f64(double [[CONV:%.*]], double [[CONV]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]])
 ; SVE:    ret void
 ;
   entry:
@@ -824,17 +772,15 @@ define void @llvm_maxnum_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_maxnum_f32(float* nocapture %varray) {
+define void @llvm_maxnum_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_maxnum_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP1:%.*]], <4 x float> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.maxnum.f32(float [[CONV:%.*]], float [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_maxnum_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x float> [[TMP11]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.maxnum.f32(float [[CONV:%.*]], float [[CONV]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]])
 ; SVE:    ret void
 ;
   entry:
@@ -858,17 +804,15 @@ define void @llvm_maxnum_f32(float* nocapture %varray) {
 declare double @llvm.minnum.f64(double, double)
 declare float @llvm.minnum.f32(float, float)
 
-define void @llvm_minnum_f64(double* nocapture %varray) {
+define void @llvm_minnum_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_minnum_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[TMP1:%.*]], <2 x double> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.minnum.f64(double [[CONV:%.*]], double [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_minnum_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x double> [[TMP11]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.minnum.f64(double [[CONV:%.*]], double [[CONV]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]])
 ; SVE:    ret void
 ;
   entry:
@@ -889,17 +833,15 @@ define void @llvm_minnum_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_minnum_f32(float* nocapture %varray) {
+define void @llvm_minnum_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_minnum_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP1:%.*]], <4 x float> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.minnum.f32(float [[CONV:%.*]], float [[CONV]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_minnum_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x float> [[TMP11]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.minnum.f32(float [[CONV:%.*]], float [[CONV]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]])
 ; SVE:    ret void
 ;
   entry:
@@ -923,17 +865,15 @@ define void @llvm_minnum_f32(float* nocapture %varray) {
 declare double @llvm.nearbyint.f64(double)
 declare float @llvm.nearbyint.f32(float)
 
-define void @llvm_nearbyint_f64(double* nocapture %varray) {
+define void @llvm_nearbyint_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_nearbyint_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.nearbyint.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_nearbyint_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.nearbyint.f64(double [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -954,17 +894,15 @@ define void @llvm_nearbyint_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_nearbyint_f32(float* nocapture %varray) {
+define void @llvm_nearbyint_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_nearbyint_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.nearbyint.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_nearbyint_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.nearbyint.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.nearbyint.f32(float [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.nearbyint.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -988,17 +926,15 @@ define void @llvm_nearbyint_f32(float* nocapture %varray) {
 declare double @llvm.pow.f64(double, double)
 declare float @llvm.pow.f32(float, float)
 
-define void @llvm_pow_f64(double* nocapture %varray) {
+define void @llvm_pow_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_pow_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2vv_pow(<2 x double> [[TMP1:%.*]], <2 x double> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.pow.f64(double [[CONV:%.*]], double [[CONV]]) #[[ATTR15:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_pow_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxvv_pow(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x double> [[TMP11]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.pow.f64(double [[CONV:%.*]], double [[CONV]]) #[[ATTR18:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxvv_pow(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x double> [[TMP17]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1019,17 +955,15 @@ define void @llvm_pow_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_pow_f32(float* nocapture %varray) {
+define void @llvm_pow_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_pow_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4vv_powf(<4 x float> [[TMP1:%.*]], <4 x float> [[TMP1]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.pow.f32(float [[CONV:%.*]], float [[CONV]]) #[[ATTR16:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_pow_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxvv_powf(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x float> [[TMP11]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.pow.f32(float [[CONV:%.*]], float [[CONV]]) #[[ATTR19:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxvv_powf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x float> [[TMP17]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1053,17 +987,15 @@ define void @llvm_pow_f32(float* nocapture %varray) {
 declare double @llvm.rint.f64(double)
 declare float @llvm.rint.f32(float)
 
-define void @llvm_rint_f64(double* nocapture %varray) {
+define void @llvm_rint_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_rint_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.rint.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_rint_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.rint.f64(double [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1084,17 +1016,15 @@ define void @llvm_rint_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_rint_f32(float* nocapture %varray) {
+define void @llvm_rint_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_rint_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.rint.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_rint_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.rint.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.rint.f32(float [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.rint.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1118,17 +1048,15 @@ define void @llvm_rint_f32(float* nocapture %varray) {
 declare double @llvm.round.f64(double)
 declare float @llvm.round.f32(float)
 
-define void @llvm_round_f64(double* nocapture %varray) {
+define void @llvm_round_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_round_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.round.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_round_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.round.f64(double [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1149,17 +1077,15 @@ define void @llvm_round_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_round_f32(float* nocapture %varray) {
+define void @llvm_round_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_round_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.round.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.round.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_round_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.round.f32(float [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1183,17 +1109,15 @@ define void @llvm_round_f32(float* nocapture %varray) {
 declare double @llvm.sin.f64(double)
 declare float @llvm.sin.f32(float)
 
-define void @llvm_sin_f64(double* nocapture %varray) {
+define void @llvm_sin_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_sin_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @_ZGVnN2v_sin(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.sin.f64(double [[CONV:%.*]]) #[[ATTR17:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_sin_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_sin(<vscale x 2 x double> [[TMP11:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call double @llvm.sin.f64(double [[CONV:%.*]]) #[[ATTR20:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_sin(<vscale x 2 x double> [[TMP17:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1214,17 +1138,15 @@ define void @llvm_sin_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_sin_f32(float* nocapture %varray) {
+define void @llvm_sin_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_sin_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @_ZGVnN4v_sinf(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.sin.f32(float [[CONV:%.*]]) #[[ATTR18:[0-9]+]]
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_sin_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_sinf(<vscale x 4 x float> [[TMP11:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; SVE:    [[CALL:%.*]] = tail call float @llvm.sin.f32(float [[CONV:%.*]]) #[[ATTR21:[0-9]+]]
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_sinf(<vscale x 4 x float> [[TMP17:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1248,17 +1170,15 @@ define void @llvm_sin_f32(float* nocapture %varray) {
 declare double @llvm.sqrt.f64(double)
 declare float @llvm.sqrt.f32(float)
 
-define void @llvm_sqrt_f64(double* nocapture %varray) {
+define void @llvm_sqrt_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_sqrt_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.sqrt.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_sqrt_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.sqrt.f64(double [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1279,17 +1199,15 @@ define void @llvm_sqrt_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_sqrt_f32(float* nocapture %varray) {
+define void @llvm_sqrt_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_sqrt_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.sqrt.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_sqrt_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.sqrt.f32(float [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1313,17 +1231,15 @@ define void @llvm_sqrt_f32(float* nocapture %varray) {
 declare double @llvm.trunc.f64(double)
 declare float @llvm.trunc.f32(float)
 
-define void @llvm_trunc_f64(double* nocapture %varray) {
+define void @llvm_trunc_f64(double* %varray) {
 ; NEON-LABEL: define void @llvm_trunc_f64
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call double @llvm.trunc.f64(double [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_trunc_f64
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call double @llvm.trunc.f64(double [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:
@@ -1344,17 +1260,15 @@ define void @llvm_trunc_f64(double* nocapture %varray) {
   ret void
 }
 
-define void @llvm_trunc_f32(float* nocapture %varray) {
+define void @llvm_trunc_f32(float* %varray) {
 ; NEON-LABEL: define void @llvm_trunc_f32
-; NEON-SAME: (ptr nocapture [[VARRAY:%.*]]) {
+; NEON-SAME: (ptr [[VARRAY:%.*]]) {
 ; NEON:    [[TMP2:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[TMP1:%.*]])
-; NEON:    [[CALL:%.*]] = tail call float @llvm.trunc.f32(float [[CONV:%.*]])
 ; NEON:    ret void
 ;
 ; SVE-LABEL: define void @llvm_trunc_f32
-; SVE-SAME: (ptr nocapture [[VARRAY:%.*]]) #[[ATTR1]] {
-; SVE:    [[TMP12:%.*]] = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> [[TMP11:%.*]])
-; SVE:    [[CALL:%.*]] = tail call float @llvm.trunc.f32(float [[CONV:%.*]])
+; SVE-SAME: (ptr [[VARRAY:%.*]]) #[[ATTR1]] {
+; SVE:    [[TMP18:%.*]] = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> [[TMP17:%.*]])
 ; SVE:    ret void
 ;
   entry:


        


More information about the llvm-commits mailing list