[llvm] e836292 - [AArch64] Regenerate test lines in sve-implicit-zero-filling.ll

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 21 03:47:10 PDT 2021


Author: David Green
Date: 2021-09-21T11:44:41+01:00
New Revision: e83629280f32102cd93a216490188922843af06c

URL: https://github.com/llvm/llvm-project/commit/e83629280f32102cd93a216490188922843af06c
DIFF: https://github.com/llvm/llvm-project/commit/e83629280f32102cd93a216490188922843af06c.diff

LOG: [AArch64] Regenerate test lines in sve-implicit-zero-filling.ll

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll b/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
index 8f0356b37bfe..537587cef609 100644
--- a/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
+++ b/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s | FileCheck %s
 
 target triple = "aarch64-unknown-linux-gnu"
@@ -5,8 +6,9 @@ target triple = "aarch64-unknown-linux-gnu"
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 16 x i8> @andv_zero_fill(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) #0 {
 ; CHECK-LABEL: andv_zero_fill:
-; CHECK: andv b0, p0, z0.b
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    andv b0, p0, z0.b
+; CHECK-NEXT:    ret
   %t1 = call i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
   %t2 = insertelement <vscale x 16 x i8> zeroinitializer, i8 %t1, i64 0
   ret <vscale x 16 x i8> %t2
@@ -15,8 +17,9 @@ define <vscale x 16 x i8> @andv_zero_fill(<vscale x 16 x i1> %pg, <vscale x 16 x
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 8 x i16> @eorv_zero_fill(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
 ; CHECK-LABEL: eorv_zero_fill:
-; CHECK: eorv h0, p0, z0.h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eorv h0, p0, z0.h
+; CHECK-NEXT:    ret
   %t1 = call i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
   %t2 = insertelement <vscale x 8 x i16> zeroinitializer, i16 %t1, i64 0
   ret <vscale x 8 x i16> %t2
@@ -25,8 +28,10 @@ define <vscale x 8 x i16> @eorv_zero_fill(<vscale x 8 x i1> %pg, <vscale x 8 x i
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 2 x double> @fadda_zero_fill(<vscale x 2 x i1> %pg, double %init, <vscale x 2 x double> %a) #0 {
 ; CHECK-LABEL: fadda_zero_fill:
-; CHECK: fadda d0, p0, d0, z1.d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    fadda d0, p0, d0, z1.d
+; CHECK-NEXT:    ret
   %t1 = call double @llvm.aarch64.sve.fadda.nxv2f64(<vscale x 2 x i1> %pg, double %init, <vscale x 2 x double> %a)
   %t2 = insertelement <vscale x 2 x double> zeroinitializer, double %t1, i64 0
   ret <vscale x 2 x double> %t2
@@ -35,8 +40,9 @@ define <vscale x 2 x double> @fadda_zero_fill(<vscale x 2 x i1> %pg, double %ini
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 4 x float> @faddv_zero_fill(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
 ; CHECK-LABEL: faddv_zero_fill:
-; CHECK: faddv s0, p0, z0.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    faddv s0, p0, z0.s
+; CHECK-NEXT:    ret
   %t1 = call float @llvm.aarch64.sve.faddv.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a)
   %t2 = insertelement <vscale x 4 x float> zeroinitializer, float %t1, i64 0
   ret <vscale x 4 x float> %t2
@@ -45,8 +51,9 @@ define <vscale x 4 x float> @faddv_zero_fill(<vscale x 4 x i1> %pg, <vscale x 4
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 8 x half> @fmaxv_zero_fill(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
 ; CHECK-LABEL: fmaxv_zero_fill:
-; CHECK: fmaxv h0, p0, z0.h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxv h0, p0, z0.h
+; CHECK-NEXT:    ret
   %t1 = call half @llvm.aarch64.sve.fmaxv.nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a)
   %t2 = insertelement <vscale x 8 x half> zeroinitializer, half %t1, i64 0
   ret <vscale x 8 x half> %t2
@@ -55,8 +62,9 @@ define <vscale x 8 x half> @fmaxv_zero_fill(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 2 x float> @fmaxnmv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a) #0 {
 ; CHECK-LABEL: fmaxnmv_zero_fill:
-; CHECK: fmaxnmv s0, p0, z0.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnmv s0, p0, z0.s
+; CHECK-NEXT:    ret
   %t1 = call float @llvm.aarch64.sve.fmaxnmv.nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a)
   %t2 = insertelement <vscale x 2 x float> zeroinitializer, float %t1, i64 0
   ret <vscale x 2 x float> %t2
@@ -65,8 +73,9 @@ define <vscale x 2 x float> @fmaxnmv_zero_fill(<vscale x 2 x i1> %pg, <vscale x
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 2 x float> @fminnmv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a) #0 {
 ; CHECK-LABEL: fminnmv_zero_fill:
-; CHECK: fminnmv s0, p0, z0.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnmv s0, p0, z0.s
+; CHECK-NEXT:    ret
   %t1 = call float @llvm.aarch64.sve.fminnmv.nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a)
   %t2 = insertelement <vscale x 2 x float> zeroinitializer, float %t1, i64 0
   ret <vscale x 2 x float> %t2
@@ -75,8 +84,9 @@ define <vscale x 2 x float> @fminnmv_zero_fill(<vscale x 2 x i1> %pg, <vscale x
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 2 x float> @fminv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a) #0 {
 ; CHECK-LABEL: fminv_zero_fill:
-; CHECK: fminv s0, p0, z0.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminv s0, p0, z0.s
+; CHECK-NEXT:    ret
   %t1 = call float @llvm.aarch64.sve.fminv.nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a)
   %t2 = insertelement <vscale x 2 x float> zeroinitializer, float %t1, i64 0
   ret <vscale x 2 x float> %t2
@@ -85,8 +95,9 @@ define <vscale x 2 x float> @fminv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 4 x i32> @orv_zero_fill(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) #0 {
 ; CHECK-LABEL: orv_zero_fill:
-; CHECK: orv s0, p0, z0.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orv s0, p0, z0.s
+; CHECK-NEXT:    ret
   %t1 = call i32 @llvm.aarch64.sve.orv.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
   %t2 = insertelement <vscale x 4 x i32> zeroinitializer, i32 %t1, i64 0
   ret <vscale x 4 x i32> %t2
@@ -95,8 +106,9 @@ define <vscale x 4 x i32> @orv_zero_fill(<vscale x 4 x i1> %pg, <vscale x 4 x i3
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 2 x i64> @saddv_zero_fill(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) #0 {
 ; CHECK-LABEL: saddv_zero_fill:
-; CHECK: saddv d0, p0, z0.b
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    saddv d0, p0, z0.b
+; CHECK-NEXT:    ret
   %t1 = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
   %t2 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t1, i64 0
   ret <vscale x 2 x i64> %t2
@@ -105,8 +117,9 @@ define <vscale x 2 x i64> @saddv_zero_fill(<vscale x 16 x i1> %pg, <vscale x 16
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 2 x i64> @smaxv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
 ; CHECK-LABEL: smaxv_zero_fill:
-; CHECK: smaxv d0, p0, z0.d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxv d0, p0, z0.d
+; CHECK-NEXT:    ret
   %t1 = call i64 @llvm.aarch64.sve.smaxv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
   %t2 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t1, i64 0
   ret <vscale x 2 x i64> %t2
@@ -115,8 +128,9 @@ define <vscale x 2 x i64> @smaxv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 4 x i32> @sminv_zero_fill(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) #0 {
 ; CHECK-LABEL: sminv_zero_fill:
-; CHECK: sminv s0, p0, z0.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminv s0, p0, z0.s
+; CHECK-NEXT:    ret
   %t1 = call i32 @llvm.aarch64.sve.sminv.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
   %t2 = insertelement <vscale x 4 x i32> zeroinitializer, i32 %t1, i64 0
   ret <vscale x 4 x i32> %t2
@@ -125,8 +139,9 @@ define <vscale x 4 x i32> @sminv_zero_fill(<vscale x 4 x i1> %pg, <vscale x 4 x
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 2 x i64> @uaddv_zero_fill(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
 ; CHECK-LABEL: uaddv_zero_fill:
-; CHECK: uaddv d0, p0, z0.h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uaddv d0, p0, z0.h
+; CHECK-NEXT:    ret
   %t1 = call i64 @llvm.aarch64.sve.uaddv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
   %t2 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t1, i64 0
   ret <vscale x 2 x i64> %t2
@@ -135,8 +150,9 @@ define <vscale x 2 x i64> @uaddv_zero_fill(<vscale x 8 x i1> %pg, <vscale x 8 x
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 16 x i8> @umaxv_zero_fill(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) #0 {
 ; CHECK-LABEL: umaxv_zero_fill:
-; CHECK: umaxv b0, p0, z0.b
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxv b0, p0, z0.b
+; CHECK-NEXT:    ret
   %t1 = call i8 @llvm.aarch64.sve.umaxv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
   %t2 = insertelement <vscale x 16 x i8> zeroinitializer, i8 %t1, i64 0
   ret <vscale x 16 x i8> %t2
@@ -145,8 +161,9 @@ define <vscale x 16 x i8> @umaxv_zero_fill(<vscale x 16 x i1> %pg, <vscale x 16
 ; Ensure we rely on the reduction's implicit zero filling.
 define <vscale x 2 x i64> @uminv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
 ; CHECK-LABEL: uminv_zero_fill:
-; CHECK: uminv d0, p0, z0.d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminv d0, p0, z0.d
+; CHECK-NEXT:    ret
   %t1 = call i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
   %t2 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t1, i64 0
   ret <vscale x 2 x i64> %t2
@@ -157,9 +174,17 @@ define <vscale x 2 x i64> @uminv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x
 ; so only checks the presence of one instruction from the expected chain.
 define <vscale x 2 x i64> @zero_fill_non_zero_index(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
 ; CHECK-LABEL: zero_fill_non_zero_index:
-; CHECK: uminv d{{[0-9]+}}, p0, z0.d
-; CHECK: mov z{{[0-9]+}}.d, p{{[0-9]+}}/m, x{{[0-9]+}}
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminv d0, p0, z0.d
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    index z1.d, #0, #1
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    mov z0.d, x8
+; CHECK-NEXT:    cmpeq p0.d, p0/z, z1.d, z0.d
+; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p0/m, x9
+; CHECK-NEXT:    ret
   %t1 = call i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
   %t2 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t1, i64 1
   ret <vscale x 2 x i64> %t2
@@ -169,9 +194,10 @@ define <vscale x 2 x i64> @zero_fill_non_zero_index(<vscale x 2 x i1> %pg, <vsca
 ; the reduction instruction.
 define <vscale x 4 x i64> @zero_fill_type_mismatch(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
 ; CHECK-LABEL: zero_fill_type_mismatch:
-; CHECK: uminv d0, p0, z0.d
-; CHECK-NEXT: mov z1.d, #0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminv d0, p0, z0.d
+; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    ret
   %t1 = call i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
   %t2 = insertelement <vscale x 4 x i64> zeroinitializer, i64 %t1, i64 0
   ret <vscale x 4 x i64> %t2
@@ -183,9 +209,14 @@ define <vscale x 4 x i64> @zero_fill_type_mismatch(<vscale x 2 x i1> %pg, <vscal
 ; so only checks the presence of one instruction from the expected chain.
 define <vscale x 2 x i64> @zero_fill_no_zero_upper_lanes(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
 ; CHECK-LABEL: zero_fill_no_zero_upper_lanes:
-; CHECK: umin z{{[0-9]+}}.d, p0/m, z0.d, z0.d
-; CHECK: mov z{{[0-9]+}}.d, p{{[0-9]+}}/m, x{{[0-9]+}}
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umin z0.d, p0/m, z0.d, z0.d
+; CHECK-NEXT:    ptrue p1.d, vl1
+; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    mov z1.d, p1/m, x8
+; CHECK-NEXT:    mov z0.d, z1.d
+; CHECK-NEXT:    ret
   %t1 = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %a)
   %t2 = extractelement <vscale x 2 x i64> %t1, i64 0
   %t3 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t2, i64 0


        


More information about the llvm-commits mailing list