[llvm] 0dab862 - [NFC] Autogenerate a couple of AArch64 tests.

Amaury Séchet via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 14 10:00:33 PDT 2023


Author: Amaury Séchet
Date: 2023-06-14T17:00:26Z
New Revision: 0dab86265035dd97ecf042dfad6571ff59867dad

URL: https://github.com/llvm/llvm-project/commit/0dab86265035dd97ecf042dfad6571ff59867dad
DIFF: https://github.com/llvm/llvm-project/commit/0dab86265035dd97ecf042dfad6571ff59867dad.diff

LOG: [NFC] Autogenerate a couple of AArch64 tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
    llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
index 936f38e49939e..e8f97309149c7 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
@@ -1,17 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
 
 ; Verify that DAG combine rules for LD1 + sext/zext don't apply when the
 ; result of LD1 has multiple uses
 
 define <vscale x 2 x i64> @no_dag_combine_zext_sext(<vscale x 2 x i1> %pg,
+; CHECK-LABEL: no_dag_combine_zext_sext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1b { z0.d }, p0/z, [z0.d, #16]
+; CHECK-NEXT:    st1b { z0.d }, p1, [x0]
+; CHECK-NEXT:    and z0.d, z0.d, #0xff
+; CHECK-NEXT:    ret
                                                     <vscale x 2 x i64> %base,
                                                     <vscale x 2 x i8>* %res_out,
                                                     <vscale x 2 x i1> %pred) {
-; CHECK-LABEL: no_dag_combine_zext_sext
-; CHECK:  	ld1b	{ z0.d }, p0/z, [z0.d, #16]
-; CHECK-NEXT:	st1b	{ z0.d }, p1, [x0]
-; CHECK-NEXT:	and	z0.d, z0.d, #0xff
-; CHECK-NEXT: ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
                                                                                            <vscale x 2 x i64> %base,
                                                                                            i64 16)
@@ -26,16 +28,17 @@ define <vscale x 2 x i64> @no_dag_combine_zext_sext(<vscale x 2 x i1> %pg,
 }
 
 define <vscale x 2 x i64> @no_dag_combine_sext(<vscale x 2 x i1> %pg,
+; CHECK-LABEL: no_dag_combine_sext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1b { z1.d }, p0/z, [z0.d, #16]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    movprfx z0, z1
+; CHECK-NEXT:    sxtb z0.d, p0/m, z1.d
+; CHECK-NEXT:    st1b { z1.d }, p1, [x0]
+; CHECK-NEXT:    ret
                                                <vscale x 2 x i64> %base,
                                                <vscale x 2 x i8>* %res_out,
                                                <vscale x 2 x i1> %pred) {
-; CHECK-LABEL: no_dag_combine_sext
-; CHECK:  	ld1b	{ z1.d }, p0/z, [z0.d, #16]
-; CHECK-NEXT:	ptrue	p0.d
-; CHECK-NEXT: movprfx z0, z1
-; CHECK-NEXT:	sxtb	z0.d, p0/m, z1.d
-; CHECK-NEXT:	st1b	{ z1.d }, p1, [x0]
-; CHECK-NEXT:	ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
                                                                                            <vscale x 2 x i64> %base,
                                                                                            i64 16)
@@ -49,14 +52,15 @@ define <vscale x 2 x i64> @no_dag_combine_sext(<vscale x 2 x i1> %pg,
 }
 
 define <vscale x 2 x i64> @no_dag_combine_zext(<vscale x 2 x i1> %pg,
+; CHECK-LABEL: no_dag_combine_zext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1b { z0.d }, p0/z, [z0.d, #16]
+; CHECK-NEXT:    st1b { z0.d }, p1, [x0]
+; CHECK-NEXT:    and z0.d, z0.d, #0xff
+; CHECK-NEXT:    ret
                                                <vscale x 2 x i64> %base,
                                                <vscale x 2 x i8>* %res_out,
                                                <vscale x 2 x i1> %pred) {
-; CHECK-LABEL: no_dag_combine_zext
-; CHECK:  	ld1b	{ z0.d }, p0/z, [z0.d, #16]
-; CHECK-NEXT:	st1b	{ z0.d }, p1, [x0]
-; CHECK-NEXT:	and	z0.d, z0.d, #0xff
-; CHECK-NEXT:	ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
                                                                                            <vscale x 2 x i64> %base,
                                                                                            i64 16)

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
index 1ddcd16f1c121..aa26352e998bb 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
 ;
@@ -6,9 +7,9 @@
 
 define <vscale x 2 x i64> @masked_zload_nxv2i8(<vscale x 2 x i8>* %src, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv2i8:
-; CHECK-NOT: ld1sb
-; CHECK: ld1b { [[IN:z[0-9]+]].d }, [[PG:p[0-9]+]]/z, [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %ext = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
@@ -16,9 +17,9 @@ define <vscale x 2 x i64> @masked_zload_nxv2i8(<vscale x 2 x i8>* %src, <vscale
 
 define <vscale x 2 x i64> @masked_zload_nxv2i16(<vscale x 2 x i16>* %src, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv2i16:
-; CHECK-NOT: ld1sh
-; CHECK: ld1h { [[IN:z[0-9]+]].d }, [[PG:p[0-9]+]]/z, [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %ext = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
@@ -26,9 +27,9 @@ define <vscale x 2 x i64> @masked_zload_nxv2i16(<vscale x 2 x i16>* %src, <vscal
 
 define <vscale x 2 x i64> @masked_zload_nxv2i32(<vscale x 2 x i32>* %src, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv2i32:
-; CHECK-NOT: ld1sw
-; CHECK: ld1w { [[IN:z[0-9]+]].d }, [[PG:p[0-9]+]]/z, [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
@@ -36,9 +37,9 @@ define <vscale x 2 x i64> @masked_zload_nxv2i32(<vscale x 2 x i32>* %src, <vscal
 
 define <vscale x 4 x i32> @masked_zload_nxv4i8(<vscale x 4 x i8>* %src, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv4i8:
-; CHECK-NOT: ld1sb
-; CHECK: ld1b { [[IN:z[0-9]+]].s }, [[PG:p[0-9]+]]/z, [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
   %ext = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %ext
@@ -46,9 +47,9 @@ define <vscale x 4 x i32> @masked_zload_nxv4i8(<vscale x 4 x i8>* %src, <vscale
 
 define <vscale x 4 x i32> @masked_zload_nxv4i16(<vscale x 4 x i16>* %src, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv4i16:
-; CHECK-NOT: ld1sh
-; CHECK: ld1h { [[IN:z[0-9]+]].s }, [[PG:p[0-9]+]]/z, [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %ext = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %ext
@@ -56,9 +57,9 @@ define <vscale x 4 x i32> @masked_zload_nxv4i16(<vscale x 4 x i16>* %src, <vscal
 
 define <vscale x 8 x i16> @masked_zload_nxv8i8(<vscale x 8 x i8>* %src, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv8i8:
-; CHECK-NOT: ld1sb
-; CHECK: ld1b { [[IN:z[0-9]+]].h }, [[PG:p[0-9]+]]/z, [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ret
   %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %src, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
   %ext = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %ext
@@ -66,11 +67,11 @@ define <vscale x 8 x i16> @masked_zload_nxv8i8(<vscale x 8 x i8>* %src, <vscale
 
 define <vscale x 2 x i64> @masked_zload_passthru(<vscale x 2 x i32>* %src, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
 ; CHECK-LABEL: masked_zload_passthru:
-; CHECK-NOT: ld1sw
-; CHECK: ld1w { [[IN:z[0-9]+]].d }, [[PG:p[0-9]+]]/z, [x0]
-; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
-; CHECK-NEXT: mov z0.d, [[PG]]/m, [[IN]].d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    and z0.d, z0.d, #0xffffffff
+; CHECK-NEXT:    mov z0.d, p0/m, z1.d
+; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
   %ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
@@ -79,7 +80,8 @@ define <vscale x 2 x i64> @masked_zload_passthru(<vscale x 2 x i32>* %src, <vsca
 ; Return type requires splitting
 define <vscale x 8 x i64> @masked_zload_nxv8i16(<vscale x 8 x i16>* %a, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv8i16:
-; CHECK:         ld1h { z0.h }, p0/z, [x0]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    uunpklo z1.s, z0.h
 ; CHECK-NEXT:    uunpkhi z3.s, z0.h
 ; CHECK-NEXT:    uunpklo z0.d, z1.s
@@ -95,10 +97,11 @@ define <vscale x 8 x i64> @masked_zload_nxv8i16(<vscale x 8 x i16>* %a, <vscale
 ; Masked load requires promotion
 define <vscale x 2 x double> @masked_zload_2i16_2f64(<vscale x 2 x i16>* noalias %in, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_2i16_2f64:
-; CHECK:       ld1h { z0.d }, p0/z, [x0]
-; CHECK-NEXT:  ptrue p0.d
-; CHECK-NEXT:  ucvtf z0.d, p0/m, z0.d
-; CHECK-NEXT:  ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ucvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
   %wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %zext = zext <vscale x 2 x i16> %wide.load to <vscale x 2 x i32>
   %res = uitofp <vscale x 2 x i32> %zext to <vscale x 2 x double>


        


More information about the llvm-commits mailing list