[llvm] e89a08a - [SVE] MOVPRFX zero merging test renaming

Cameron McInally via llvm-commits llvm-commits at lists.llvm.org
Tue May 19 15:33:48 PDT 2020


Author: Cameron McInally
Date: 2020-05-19T17:33:19-05:00
New Revision: e89a08aefdaca716ee0168b18d79818076178910

URL: https://github.com/llvm/llvm-project/commit/e89a08aefdaca716ee0168b18d79818076178910
DIFF: https://github.com/llvm/llvm-project/commit/e89a08aefdaca716ee0168b18d79818076178910.diff

LOG: [SVE] MOVPRFX zero merging test renaming

Differential Revision: https://reviews.llvm.org/D80244

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-merging.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-merging.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-merging.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-merging.ll
index bc3173f146a5..b359a63aba4d 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-merging.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-merging.ll
@@ -4,8 +4,8 @@
 ; FADD
 ;
 
-define <vscale x 8 x half> @fadd_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; CHECK-LABEL: fadd_h:
+define <vscale x 8 x half> @fadd_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fadd_h_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -16,8 +16,8 @@ define <vscale x 8 x half> @fadd_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a
   ret <vscale x 8 x half> %out
 }
 
-define <vscale x 4 x float> @fadd_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; CHECK-LABEL: fadd_s:
+define <vscale x 4 x float> @fadd_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fadd_s_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -28,8 +28,8 @@ define <vscale x 4 x float> @fadd_s(<vscale x 4 x i1> %pg, <vscale x 4 x float>
   ret <vscale x 4 x float> %out
 }
 
-define <vscale x 2 x double> @fadd_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fadd_d:
+define <vscale x 2 x double> @fadd_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fadd_d_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -44,8 +44,8 @@ define <vscale x 2 x double> @fadd_d(<vscale x 2 x i1> %pg, <vscale x 2 x double
 ; FMAX
 ;
 
-define <vscale x 8 x half> @fmax_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; CHECK-LABEL: fmax_h:
+define <vscale x 8 x half> @fmax_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fmax_h_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -56,8 +56,8 @@ define <vscale x 8 x half> @fmax_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a
   ret <vscale x 8 x half> %out
 }
 
-define <vscale x 4 x float> @fmax_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; CHECK-LABEL: fmax_s:
+define <vscale x 4 x float> @fmax_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fmax_s_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -68,8 +68,8 @@ define <vscale x 4 x float> @fmax_s(<vscale x 4 x i1> %pg, <vscale x 4 x float>
   ret <vscale x 4 x float> %out
 }
 
-define <vscale x 2 x double> @fmax_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fmax_d:
+define <vscale x 2 x double> @fmax_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fmax_d_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -84,8 +84,8 @@ define <vscale x 2 x double> @fmax_d(<vscale x 2 x i1> %pg, <vscale x 2 x double
 ; FMAXNM
 ;
 
-define <vscale x 8 x half> @fmaxnm_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; CHECK-LABEL: fmaxnm_h:
+define <vscale x 8 x half> @fmaxnm_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fmaxnm_h_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -96,8 +96,8 @@ define <vscale x 8 x half> @fmaxnm_h(<vscale x 8 x i1> %pg, <vscale x 8 x half>
   ret <vscale x 8 x half> %out
 }
 
-define <vscale x 4 x float> @fmaxnm_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; CHECK-LABEL: fmaxnm_s:
+define <vscale x 4 x float> @fmaxnm_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fmaxnm_s_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -108,8 +108,8 @@ define <vscale x 4 x float> @fmaxnm_s(<vscale x 4 x i1> %pg, <vscale x 4 x float
   ret <vscale x 4 x float> %out
 }
 
-define <vscale x 2 x double> @fmaxnm_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fmaxnm_d:
+define <vscale x 2 x double> @fmaxnm_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fmaxnm_d_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -124,8 +124,8 @@ define <vscale x 2 x double> @fmaxnm_d(<vscale x 2 x i1> %pg, <vscale x 2 x doub
 ; FMIN
 ;
 
-define <vscale x 8 x half> @fmin_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; CHECK-LABEL: fmin_h:
+define <vscale x 8 x half> @fmin_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fmin_h_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -136,8 +136,8 @@ define <vscale x 8 x half> @fmin_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a
   ret <vscale x 8 x half> %out
 }
 
-define <vscale x 4 x float> @fmin_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; CHECK-LABEL: fmin_s:
+define <vscale x 4 x float> @fmin_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fmin_s_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -148,8 +148,8 @@ define <vscale x 4 x float> @fmin_s(<vscale x 4 x i1> %pg, <vscale x 4 x float>
   ret <vscale x 4 x float> %out
 }
 
-define <vscale x 2 x double> @fmin_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fmin_d:
+define <vscale x 2 x double> @fmin_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fmin_d_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -164,8 +164,8 @@ define <vscale x 2 x double> @fmin_d(<vscale x 2 x i1> %pg, <vscale x 2 x double
 ; FMINNM
 ;
 
-define <vscale x 8 x half> @fminnm_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; CHECK-LABEL: fminnm_h:
+define <vscale x 8 x half> @fminnm_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fminnm_h_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -176,8 +176,8 @@ define <vscale x 8 x half> @fminnm_h(<vscale x 8 x i1> %pg, <vscale x 8 x half>
   ret <vscale x 8 x half> %out
 }
 
-define <vscale x 4 x float> @fminnm_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; CHECK-LABEL: fminnm_s:
+define <vscale x 4 x float> @fminnm_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fminnm_s_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -188,8 +188,8 @@ define <vscale x 4 x float> @fminnm_s(<vscale x 4 x i1> %pg, <vscale x 4 x float
   ret <vscale x 4 x float> %out
 }
 
-define <vscale x 2 x double> @fminnm_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fminnm_d:
+define <vscale x 2 x double> @fminnm_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fminnm_d_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -204,8 +204,8 @@ define <vscale x 2 x double> @fminnm_d(<vscale x 2 x i1> %pg, <vscale x 2 x doub
 ; FMUL
 ;
 
-define <vscale x 8 x half> @fmul_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; CHECK-LABEL: fmul_h:
+define <vscale x 8 x half> @fmul_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fmul_h_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -216,8 +216,8 @@ define <vscale x 8 x half> @fmul_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a
   ret <vscale x 8 x half> %out
 }
 
-define <vscale x 4 x float> @fmul_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; CHECK-LABEL: fmul_s:
+define <vscale x 4 x float> @fmul_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fmul_s_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -228,8 +228,8 @@ define <vscale x 4 x float> @fmul_s(<vscale x 4 x i1> %pg, <vscale x 4 x float>
   ret <vscale x 4 x float> %out
 }
 
-define <vscale x 2 x double> @fmul_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fmul_d:
+define <vscale x 2 x double> @fmul_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fmul_d_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -244,8 +244,8 @@ define <vscale x 2 x double> @fmul_d(<vscale x 2 x i1> %pg, <vscale x 2 x double
 ; FSUB
 ;
 
-define <vscale x 8 x half> @fsub_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; CHECK-LABEL: fsub_h:
+define <vscale x 8 x half> @fsub_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fsub_h_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -256,8 +256,8 @@ define <vscale x 8 x half> @fsub_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a
   ret <vscale x 8 x half> %out
 }
 
-define <vscale x 4 x float> @fsub_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; CHECK-LABEL: fsub_s:
+define <vscale x 4 x float> @fsub_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fsub_s_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -268,8 +268,8 @@ define <vscale x 4 x float> @fsub_s(<vscale x 4 x i1> %pg, <vscale x 4 x float>
   ret <vscale x 4 x float> %out
 }
 
-define <vscale x 2 x double> @fsub_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fsub_d:
+define <vscale x 2 x double> @fsub_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fsub_d_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -284,8 +284,8 @@ define <vscale x 2 x double> @fsub_d(<vscale x 2 x i1> %pg, <vscale x 2 x double
 ; FSUBR
 ;
 
-define <vscale x 8 x half> @fsubr_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; CHECK-LABEL: fsubr_h:
+define <vscale x 8 x half> @fsubr_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fsubr_h_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -296,8 +296,8 @@ define <vscale x 8 x half> @fsubr_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %
   ret <vscale x 8 x half> %out
 }
 
-define <vscale x 4 x float> @fsubr_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; CHECK-LABEL: fsubr_s:
+define <vscale x 4 x float> @fsubr_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fsubr_s_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -308,8 +308,8 @@ define <vscale x 4 x float> @fsubr_s(<vscale x 4 x i1> %pg, <vscale x 4 x float>
   ret <vscale x 4 x float> %out
 }
 
-define <vscale x 2 x double> @fsubr_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fsubr_d:
+define <vscale x 2 x double> @fsubr_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fsubr_d_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-merging.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-merging.ll
index 984ebd17322f..5fd845a15f22 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-merging.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-merging.ll
@@ -4,8 +4,8 @@
 ; ADD
 ;
 
-define <vscale x 16 x i8> @add_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: add_i8:
+define <vscale x 16 x i8> @add_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: add_i8_zero:
 ; CHECK:      movprfx z0.b, p0/z, z0.b
 ; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT: ret
@@ -16,8 +16,8 @@ define <vscale x 16 x i8> @add_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a,
   ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @add_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: add_i16:
+define <vscale x 8 x i16> @add_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: add_i16_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: add z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -28,8 +28,8 @@ define <vscale x 8 x i16> @add_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a,
   ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @add_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: add_i32:
+define <vscale x 4 x i32> @add_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: add_i32_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: add z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -40,8 +40,8 @@ define <vscale x 4 x i32> @add_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a,
   ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 2 x i64> @add_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: add_i64:
+define <vscale x 2 x i64> @add_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: add_i64_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: add z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -56,8 +56,8 @@ define <vscale x 2 x i64> @add_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a,
 ; SUB
 ;
 
-define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: sub_i8:
+define <vscale x 16 x i8> @sub_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sub_i8_zero:
 ; CHECK:      movprfx z0.b, p0/z, z0.b
 ; CHECK-NEXT: sub z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT: ret
@@ -68,8 +68,8 @@ define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a,
   ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @sub_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: sub_i16:
+define <vscale x 8 x i16> @sub_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sub_i16_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: sub z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -80,8 +80,8 @@ define <vscale x 8 x i16> @sub_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a,
   ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @sub_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: sub_i32:
+define <vscale x 4 x i32> @sub_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sub_i32_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: sub z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -92,8 +92,8 @@ define <vscale x 4 x i32> @sub_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a,
   ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: sub_i64:
+define <vscale x 2 x i64> @sub_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: sub_i64_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: sub z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -108,8 +108,8 @@ define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a,
 ; SUBR
 ;
 
-define <vscale x 16 x i8> @subr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: subr_i8:
+define <vscale x 16 x i8> @subr_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: subr_i8_zero:
 ; CHECK:      movprfx z0.b, p0/z, z0.b
 ; CHECK-NEXT: subr z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT: ret
@@ -120,8 +120,8 @@ define <vscale x 16 x i8> @subr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a
   ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @subr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: subr_i16:
+define <vscale x 8 x i16> @subr_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: subr_i16_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: subr z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -132,8 +132,8 @@ define <vscale x 8 x i16> @subr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a
   ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @subr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: subr_i32:
+define <vscale x 4 x i32> @subr_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: subr_i32_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: subr z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -144,8 +144,8 @@ define <vscale x 4 x i32> @subr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a
   ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 2 x i64> @subr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: subr_i64:
+define <vscale x 2 x i64> @subr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: subr_i64_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: subr z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll
index 7f5105da675e..15e1707a270a 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll
@@ -4,8 +4,8 @@
 ; ASR
 ;
 
-define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: asr_i8:
+define <vscale x 16 x i8> @asr_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: asr_i8_zero:
 ; CHECK:      movprfx z0.b, p0/z, z0.b
 ; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT: ret
@@ -16,8 +16,8 @@ define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a,
   ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: asr_i16:
+define <vscale x 8 x i16> @asr_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: asr_i16_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -28,8 +28,8 @@ define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a,
   ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: asr_i32:
+define <vscale x 4 x i32> @asr_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: asr_i32_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -40,8 +40,8 @@ define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a,
   ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: asr_i64:
+define <vscale x 2 x i64> @asr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: asr_i64_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -52,8 +52,8 @@ define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a,
   ret <vscale x 2 x i64> %out
 }
 
-define <vscale x 16 x i8> @asr_wide_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: asr_wide_i8:
+define <vscale x 16 x i8> @asr_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: asr_wide_i8_zero:
 ; CHECK-NOT:  movprfx
 ; CHECK: asr z0.b, p0/m, z0.b, z1.d
   %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
@@ -63,8 +63,8 @@ define <vscale x 16 x i8> @asr_wide_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @asr_wide_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: asr_wide_i16:
+define <vscale x 8 x i16> @asr_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: asr_wide_i16_zero:
 ; CHECK-NOT:  movprfx
 ; CHECK: asr z0.h, p0/m, z0.h, z1.d
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
@@ -74,8 +74,8 @@ define <vscale x 8 x i16> @asr_wide_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16
   ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @asr_wide_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: asr_wide_i32:
+define <vscale x 4 x i32> @asr_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: asr_wide_i32_zero:
 ; CHECK-NOT:  movprfx
 ; CHECK: asr z0.s, p0/m, z0.s, z1.d
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
@@ -89,8 +89,8 @@ define <vscale x 4 x i32> @asr_wide_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32
 ; ASRD
 ;
 
-define <vscale x 16 x i8> @asrd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
-; CHECK-LABEL: asrd_i8:
+define <vscale x 16 x i8> @asrd_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: asrd_i8_zero:
 ; CHECK:      movprfx z0.b, p0/z, z0.b
 ; CHECK-NEXT: asrd z0.b, p0/m, z0.b, #1
 ; CHECK-NEXT: ret
@@ -101,8 +101,8 @@ define <vscale x 16 x i8> @asrd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a
   ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @asrd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
-; CHECK-LABEL: asrd_i16:
+define <vscale x 8 x i16> @asrd_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: asrd_i16_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #2
 ; CHECK-NEXT: ret
@@ -113,8 +113,8 @@ define <vscale x 8 x i16> @asrd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a
   ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @asrd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
-; CHECK-LABEL: asrd_i32:
+define <vscale x 4 x i32> @asrd_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: asrd_i32_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #31
 ; CHECK-NEXT: ret
@@ -125,8 +125,8 @@ define <vscale x 4 x i32> @asrd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a
   ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 2 x i64> @asrd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
-; CHECK-LABEL: asrd_i64:
+define <vscale x 2 x i64> @asrd_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
+; CHECK-LABEL: asrd_i64_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: asrd z0.d, p0/m, z0.d, #64
 ; CHECK-NEXT: ret
@@ -141,8 +141,8 @@ define <vscale x 2 x i64> @asrd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a
 ; LSL
 ;
 
-define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: lsl_i8:
+define <vscale x 16 x i8> @lsl_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: lsl_i8_zero:
 ; CHECK:      movprfx z0.b, p0/z, z0.b
 ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT: ret
@@ -153,8 +153,8 @@ define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a,
   ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: lsl_i16:
+define <vscale x 8 x i16> @lsl_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: lsl_i16_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -165,8 +165,8 @@ define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a,
   ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: lsl_i32:
+define <vscale x 4 x i32> @lsl_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: lsl_i32_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -177,8 +177,8 @@ define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a,
   ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: lsl_i64:
+define <vscale x 2 x i64> @lsl_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: lsl_i64_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -189,8 +189,8 @@ define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a,
   ret <vscale x 2 x i64> %out
 }
 
-define <vscale x 16 x i8> @lsl_wide_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: lsl_wide_i8:
+define <vscale x 16 x i8> @lsl_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: lsl_wide_i8_zero:
 ; CHECK-NOT:  movprfx
 ; CHECK: lsl z0.b, p0/m, z0.b, z1.d
   %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
@@ -200,8 +200,8 @@ define <vscale x 16 x i8> @lsl_wide_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @lsl_wide_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: lsl_wide_i16:
+define <vscale x 8 x i16> @lsl_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: lsl_wide_i16_zero:
 ; CHECK-NOT:  movprfx
 ; CHECK: lsl z0.h, p0/m, z0.h, z1.d
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
@@ -211,8 +211,8 @@ define <vscale x 8 x i16> @lsl_wide_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16
   ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @lsl_wide_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: lsl_wide_i32:
+define <vscale x 4 x i32> @lsl_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: lsl_wide_i32_zero:
 ; CHECK-NOT:  movprfx
 ; CHECK: lsl z0.s, p0/m, z0.s, z1.d
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
@@ -226,8 +226,8 @@ define <vscale x 4 x i32> @lsl_wide_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32
 ; LSR
 ;
 
-define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: lsr_i8:
+define <vscale x 16 x i8> @lsr_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: lsr_i8_zero:
 ; CHECK:      movprfx z0.b, p0/z, z0.b
 ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT: ret
@@ -238,8 +238,8 @@ define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a,
   ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: lsr_i16:
+define <vscale x 8 x i16> @lsr_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: lsr_i16_zero:
 ; CHECK:      movprfx z0.h, p0/z, z0.h
 ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT: ret
@@ -250,8 +250,8 @@ define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a,
   ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: lsr_i32:
+define <vscale x 4 x i32> @lsr_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: lsr_i32_zero:
 ; CHECK:      movprfx z0.s, p0/z, z0.s
 ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT: ret
@@ -262,8 +262,8 @@ define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a,
   ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: lsr_i64:
+define <vscale x 2 x i64> @lsr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: lsr_i64_zero:
 ; CHECK:      movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT: ret
@@ -274,8 +274,8 @@ define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a,
   ret <vscale x 2 x i64> %out
 }
 
-define <vscale x 16 x i8> @lsr_wide_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: lsr_wide_i8:
+define <vscale x 16 x i8> @lsr_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: lsr_wide_i8_zero:
 ; CHECK-NOT:  movprfx
 ; CHECK: lsr z0.b, p0/m, z0.b, z1.d
   %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
@@ -285,8 +285,8 @@ define <vscale x 16 x i8> @lsr_wide_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @lsr_wide_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: lsr_wide_i16:
+define <vscale x 8 x i16> @lsr_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: lsr_wide_i16_zero:
 ; CHECK-NOT:  movprfx
 ; CHECK: lsr z0.h, p0/m, z0.h, z1.d
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
@@ -296,8 +296,8 @@ define <vscale x 8 x i16> @lsr_wide_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16
   ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @lsr_wide_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: lsr_wide_i32:
+define <vscale x 4 x i32> @lsr_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: lsr_wide_i32_zero:
 ; CHECK-NOT:  movprfx
 ; CHECK: lsr z0.s, p0/m, z0.s, z1.d
   %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer


        


More information about the llvm-commits mailing list