[llvm] f330a45 - [AArch64] NFC: Regenerate CHECK lines for sve-masked-gather/scatter-legalize.ll

Sander de Smalen via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 6 03:45:23 PDT 2021


Author: Sander de Smalen
Date: 2021-09-06T11:44:36+01:00
New Revision: f330a4575229532c2130040ab680645022c6cea6

URL: https://github.com/llvm/llvm-project/commit/f330a4575229532c2130040ab680645022c6cea6
DIFF: https://github.com/llvm/llvm-project/commit/f330a4575229532c2130040ab680645022c6cea6.diff

LOG: [AArch64] NFC: Regenerate CHECK lines for sve-masked-gather/scatter-legalize.ll

sve-masked-gather-legalize.ll said the check lines were generated by
the update_llc_test_checks script, but that was not the case.
This patch ensures both tests are generated with the script.

Change-Id: If6f0331ef01ace84017497a484161d1724ac0744

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
    llvm/test/CodeGen/AArch64/sve-masked-scatter-legalize.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
index f97fa34392a2..16bac85e8091 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
@@ -1,10 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -aarch64-enable-mgather-combine=0 < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -aarch64-enable-mgather-combine=1 < %s | FileCheck %s
+; RUN: llc -aarch64-enable-mgather-combine=0 -enable-misched=false < %s | FileCheck %s
+; RUN: llc -aarch64-enable-mgather-combine=1 -enable-misched=false < %s | FileCheck %s
+
+target triple = "aarch64-linux-gnu"
 
 ; Test for multiple uses of the mgather where the s/zext should not be combined
 
-define <vscale x 2 x i64> @masked_sgather_sext(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) {
+define <vscale x 2 x i64> @masked_sgather_sext(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) #0 {
 ; CHECK-LABEL: masked_sgather_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d]
@@ -24,16 +26,16 @@ define <vscale x 2 x i64> @masked_sgather_sext(i8* %base, <vscale x 2 x i64> %of
   ret <vscale x 2 x i64> %mul
 }
 
-define <vscale x 2 x i64> @masked_sgather_zext(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) {
+define <vscale x 2 x i64> @masked_sgather_zext(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) #0 {
 ; CHECK-LABEL: masked_sgather_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d]
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: add z1.d, z0.d, z1.d
-; CHECK-NEXT: and z0.d, z0.d, #0xff
-; CHECK-NEXT: and z1.d, z1.d, #0xff
-; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: ret
+; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    add z1.d, z0.d, z1.d
+; CHECK-NEXT:    and z0.d, z0.d, #0xff
+; CHECK-NEXT:    and z1.d, z1.d, #0xff
+; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
   %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
   %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %data.zext = zext <vscale x 2 x i8> %data to <vscale x 2 x i64>
@@ -46,33 +48,36 @@ define <vscale x 2 x i64> @masked_sgather_zext(i8* %base, <vscale x 2 x i64> %of
 ; Tests that exercise various type legalisation scenarios for ISD::MGATHER.
 
 ; Code generate load of an illegal datatype via promotion.
-define <vscale x 2 x i8> @masked_gather_nxv2i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @masked_gather_nxv2i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2i8:
-; CHECK: ld1sb { z0.d }, p0/z, [z0.d]
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT:    ret
   %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   ret <vscale x 2 x i8> %data
 }
 
 ; Code generate load of an illegal datatype via promotion.
-define <vscale x 2 x i16> @masked_gather_nxv2i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @masked_gather_nxv2i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2i16:
-; CHECK: ld1sh { z0.d }, p0/z, [z0.d]
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT:    ret
   %data = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   ret <vscale x 2 x i16> %data
 }
 
 ; Code generate load of an illegal datatype via promotion.
-define <vscale x 2 x i32> @masked_gather_nxv2i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @masked_gather_nxv2i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2i32:
-; CHECK: ld1sw { z0.d }, p0/z, [z0.d]
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT:    ret
   %data = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   ret <vscale x 2 x i32> %data
 }
 
-define <vscale x 4 x half> @masked_gather_nxv4f16(<vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @masked_gather_nxv4f16(<vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    pfalse p1.b
@@ -86,7 +91,7 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(<vscale x 4 x half*> %ptrs, <v
   ret <vscale x 4 x half> %data
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i16> %indices, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i16> %indices, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.d
@@ -98,14 +103,14 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i
   ret <vscale x 2 x float> %data
 }
 
-define <vscale x 8 x half> @masked_gather_nxv8f16(<vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x half> @masked_gather_nxv8f16(<vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    pfalse p1.b
 ; CHECK-NEXT:    zip2 p2.h, p0.h, p1.h
-; CHECK-NEXT:    zip1 p0.h, p0.h, p1.h
 ; CHECK-NEXT:    zip2 p3.s, p2.s, p1.s
 ; CHECK-NEXT:    zip1 p2.s, p2.s, p1.s
+; CHECK-NEXT:    zip1 p0.h, p0.h, p1.h
 ; CHECK-NEXT:    ld1h { z3.d }, p3/z, [z3.d]
 ; CHECK-NEXT:    ld1h { z2.d }, p2/z, [z2.d]
 ; CHECK-NEXT:    zip2 p2.s, p0.s, p1.s
@@ -124,10 +129,10 @@ define <vscale x 8 x bfloat> @masked_gather_nxv8bf16(bfloat* %base, <vscale x 8
 ; CHECK-LABEL: masked_gather_nxv8bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    pfalse p1.b
-; CHECK-NEXT:    sunpkhi z1.s, z0.h
-; CHECK-NEXT:    sunpklo z0.s, z0.h
 ; CHECK-NEXT:    zip2 p2.h, p0.h, p1.h
+; CHECK-NEXT:    sunpkhi z1.s, z0.h
 ; CHECK-NEXT:    zip1 p0.h, p0.h, p1.h
+; CHECK-NEXT:    sunpklo z0.s, z0.h
 ; CHECK-NEXT:    ld1h { z1.s }, p2/z, [x0, z1.s, sxtw #1]
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z1.h
@@ -137,25 +142,26 @@ define <vscale x 8 x bfloat> @masked_gather_nxv8bf16(bfloat* %base, <vscale x 8
   ret <vscale x 8 x bfloat> %data
 }
 
-define <vscale x 4 x double> @masked_gather_nxv4f64(double* %base, <vscale x 4 x i16> %indices, <vscale x 4 x i1> %mask) {;
+define <vscale x 4 x double> @masked_gather_nxv4f64(double* %base, <vscale x 4 x i16> %indices, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.s
-; CHECK-NEXT:    pfalse p2.b
-; CHECK-NEXT:    sxth z0.s, p1/m, z0.s
-; CHECK-NEXT:    zip1 p1.s, p0.s, p2.s
-; CHECK-NEXT:    zip2 p0.s, p0.s, p2.s
-; CHECK-NEXT:    sunpklo z1.d, z0.s
-; CHECK-NEXT:    sunpkhi z2.d, z0.s
-; CHECK-NEXT:    ld1d { z0.d }, p1/z, [x0, z1.d, lsl #3]
-; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0, z2.d, lsl #3]
+; CHECK-NEXT:    movprfx z1, z0
+; CHECK-NEXT:    sxth z1.s, p1/m, z0.s
+; CHECK-NEXT:    pfalse p1.b
+; CHECK-NEXT:    sunpklo z0.d, z1.s
+; CHECK-NEXT:    zip1 p2.s, p0.s, p1.s
+; CHECK-NEXT:    sunpkhi z1.d, z1.s
+; CHECK-NEXT:    zip2 p0.s, p0.s, p1.s
+; CHECK-NEXT:    ld1d { z0.d }, p2/z, [x0, z0.d, lsl #3]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0, z1.d, lsl #3]
 ; CHECK-NEXT:    ret
   %ptrs = getelementptr double, double* %base, <vscale x 4 x i16> %indices
   %data = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64(<vscale x 4 x double*> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
   ret <vscale x 4 x double> %data
 }
 
-define <vscale x 8 x float> @masked_gather_nxv8f32(float* %base, <vscale x 8 x i32> %offsets, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x float> @masked_gather_nxv8f32(float* %base, <vscale x 8 x i32> %offsets, <vscale x 8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    pfalse p1.b
@@ -171,31 +177,67 @@ define <vscale x 8 x float> @masked_gather_nxv8f32(float* %base, <vscale x 8 x i
 }
 
 ; Code generate the worst case scenario when all vector types are legal.
-define <vscale x 16 x i8> @masked_gather_nxv16i8(i8* %base, <vscale x 16 x i8> %indices, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i8> @masked_gather_nxv16i8(i8* %base, <vscale x 16 x i8> %indices, <vscale x 16 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv16i8:
-; CHECK-DAG: ld1sb { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, {{z[0-9]+}}.s, sxtw]
-; CHECK-DAG: ld1sb { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, {{z[0-9]+}}.s, sxtw]
-; CHECK-DAG: ld1sb { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, {{z[0-9]+}}.s, sxtw]
-; CHECK-DAG: ld1sb { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, {{z[0-9]+}}.s, sxtw]
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p1.b
+; CHECK-NEXT:    zip2 p2.b, p0.b, p1.b
+; CHECK-NEXT:    sunpkhi z1.h, z0.b
+; CHECK-NEXT:    zip2 p3.h, p2.h, p1.h
+; CHECK-NEXT:    sunpkhi z2.s, z1.h
+; CHECK-NEXT:    zip1 p2.h, p2.h, p1.h
+; CHECK-NEXT:    sunpklo z1.s, z1.h
+; CHECK-NEXT:    ld1sb { z2.s }, p3/z, [x0, z2.s, sxtw]
+; CHECK-NEXT:    ld1sb { z1.s }, p2/z, [x0, z1.s, sxtw]
+; CHECK-NEXT:    zip1 p0.b, p0.b, p1.b
+; CHECK-NEXT:    sunpklo z0.h, z0.b
+; CHECK-NEXT:    zip2 p2.h, p0.h, p1.h
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT:    sunpkhi z2.s, z0.h
+; CHECK-NEXT:    zip1 p0.h, p0.h, p1.h
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    ld1sb { z2.s }, p2/z, [x0, z2.s, sxtw]
+; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, z0.s, sxtw]
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z2.h
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z1.b
+; CHECK-NEXT:    ret
   %ptrs = getelementptr i8, i8* %base, <vscale x 16 x i8> %indices
   %data = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8(<vscale x 16 x i8*> %ptrs, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   ret <vscale x 16 x i8> %data
 }
 
 ; Code generate the worst case scenario when all vector types are illegal.
-define <vscale x 32 x i32> @masked_gather_nxv32i32(i32* %base, <vscale x 32 x i32> %indices, <vscale x 32 x i1> %mask) {
+define <vscale x 32 x i32> @masked_gather_nxv32i32(i32* %base, <vscale x 32 x i32> %indices, <vscale x 32 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv32i32:
-; CHECK-NOT: unpkhi
-; CHECK-DAG: ld1w { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, z0.s, sxtw #2]
-; CHECK-DAG: ld1w { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, z1.s, sxtw #2]
-; CHECK-DAG: ld1w { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, z2.s, sxtw #2]
-; CHECK-DAG: ld1w { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, z3.s, sxtw #2]
-; CHECK-DAG: ld1w { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, z4.s, sxtw #2]
-; CHECK-DAG: ld1w { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, z5.s, sxtw #2]
-; CHECK-DAG: ld1w { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, z6.s, sxtw #2]
-; CHECK-DAG: ld1w { {{z[0-9]+}}.s }, {{p[0-9]+}}/z, [x0, z7.s, sxtw #2]
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    pfalse p2.b
+; CHECK-NEXT:    zip1 p3.b, p0.b, p2.b
+; CHECK-NEXT:    zip1 p4.h, p3.h, p2.h
+; CHECK-NEXT:    zip2 p3.h, p3.h, p2.h
+; CHECK-NEXT:    zip2 p0.b, p0.b, p2.b
+; CHECK-NEXT:    ld1w { z0.s }, p4/z, [x0, z0.s, sxtw #2]
+; CHECK-NEXT:    ld1w { z1.s }, p3/z, [x0, z1.s, sxtw #2]
+; CHECK-NEXT:    zip1 p3.h, p0.h, p2.h
+; CHECK-NEXT:    zip2 p0.h, p0.h, p2.h
+; CHECK-NEXT:    ld1w { z2.s }, p3/z, [x0, z2.s, sxtw #2]
+; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x0, z3.s, sxtw #2]
+; CHECK-NEXT:    zip1 p0.b, p1.b, p2.b
+; CHECK-NEXT:    zip1 p3.h, p0.h, p2.h
+; CHECK-NEXT:    zip2 p0.h, p0.h, p2.h
+; CHECK-NEXT:    ld1w { z4.s }, p3/z, [x0, z4.s, sxtw #2]
+; CHECK-NEXT:    ld1w { z5.s }, p0/z, [x0, z5.s, sxtw #2]
+; CHECK-NEXT:    zip2 p0.b, p1.b, p2.b
+; CHECK-NEXT:    zip1 p1.h, p0.h, p2.h
+; CHECK-NEXT:    zip2 p0.h, p0.h, p2.h
+; CHECK-NEXT:    ld1w { z6.s }, p1/z, [x0, z6.s, sxtw #2]
+; CHECK-NEXT:    ld1w { z7.s }, p0/z, [x0, z7.s, sxtw #2]
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %ptrs = getelementptr i32, i32* %base, <vscale x 32 x i32> %indices
   %data = call <vscale x 32 x i32> @llvm.masked.gather.nxv32i32(<vscale x 32 x i32*> %ptrs, i32 4, <vscale x 32 x i1> %mask, <vscale x 32 x i32> undef)
   ret <vscale x 32 x i32> %data
@@ -205,9 +247,10 @@ define <vscale x 32 x i32> @masked_gather_nxv32i32(i32* %base, <vscale x 32 x i3
 ; registers, so it doesn't get folded away. Same for any other vector-of-pointers
 ; style gathers which don't fit in an <vscale x 2 x type*> single register. Better folding
 ; is required before we can check those off.
-define <vscale x 4 x i32> @masked_sgather_nxv4i8(<vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_sgather_nxv4i8(<vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_sgather_nxv4i8:
-; CHECK:         pfalse p1.b
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p1.b
 ; CHECK-NEXT:    zip2 p2.s, p0.s, p1.s
 ; CHECK-NEXT:    zip1 p0.s, p0.s, p1.s
 ; CHECK-NEXT:    ld1sb { z1.d }, p2/z, [z1.d]
@@ -221,6 +264,8 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i8(<vscale x 4 x i8*> %ptrs, <vsca
   ret <vscale x 4 x i32> %svals
 }
 
+attributes #0 = { nounwind "target-features"="+sve,+bf16" }
+
 declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
 declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
 declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
@@ -234,4 +279,3 @@ declare <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16(<vscale x 8 x bfloat*
 declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
 declare <vscale x 8 x float> @llvm.masked.gather.nxv8f32(<vscale x 8 x float*>, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
 declare <vscale x 4 x double> @llvm.masked.gather.nxv4f64(<vscale x 4 x double*>, i32, <vscale x 4 x i1>, <vscale x 4 x double>)
-attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalize.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalize.ll
index 9cb642fac8bf..e8705268dda1 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalize.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalize.ll
@@ -1,44 +1,88 @@
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -enable-misched=false < %s | FileCheck %s
+
+target triple = "aarch64-linux-gnu"
 
 ; Tests that exercise various type legalisation scenarios for ISD::MSCATTER.
 
 ; Code generate the scenario where the offset vector type is illegal.
-define void @masked_scatter_nxv16i8(<vscale x 16 x i8> %data, i8* %base, <vscale x 16 x i8> %offsets, <vscale x 16 x i1> %mask) {
+define void @masked_scatter_nxv16i8(<vscale x 16 x i8> %data, i8* %base, <vscale x 16 x i8> %offsets, <vscale x 16 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_scatter_nxv16i8:
-; CHECK-DAG: st1b { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw]
-; CHECK-DAG: st1b { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw]
-; CHECK-DAG: st1b { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw]
-; CHECK-DAG: st1b { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw]
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p1.b
+; CHECK-NEXT:    zip1 p2.b, p0.b, p1.b
+; CHECK-NEXT:    sunpklo z2.h, z1.b
+; CHECK-NEXT:    uunpklo z4.h, z0.b
+; CHECK-NEXT:    zip1 p3.h, p2.h, p1.h
+; CHECK-NEXT:    sunpklo z3.s, z2.h
+; CHECK-NEXT:    uunpklo z5.s, z4.h
+; CHECK-NEXT:    st1b { z5.s }, p3, [x0, z3.s, sxtw]
+; CHECK-NEXT:    zip2 p2.h, p2.h, p1.h
+; CHECK-NEXT:    sunpkhi z2.s, z2.h
+; CHECK-NEXT:    uunpkhi z3.s, z4.h
+; CHECK-NEXT:    zip2 p0.b, p0.b, p1.b
+; CHECK-NEXT:    sunpkhi z1.h, z1.b
+; CHECK-NEXT:    uunpkhi z0.h, z0.b
+; CHECK-NEXT:    st1b { z3.s }, p2, [x0, z2.s, sxtw]
+; CHECK-NEXT:    zip1 p2.h, p0.h, p1.h
+; CHECK-NEXT:    sunpklo z2.s, z1.h
+; CHECK-NEXT:    uunpklo z3.s, z0.h
+; CHECK-NEXT:    zip2 p0.h, p0.h, p1.h
+; CHECK-NEXT:    sunpkhi z1.s, z1.h
+; CHECK-NEXT:    uunpkhi z0.s, z0.h
+; CHECK-NEXT:    st1b { z3.s }, p2, [x0, z2.s, sxtw]
+; CHECK-NEXT:    st1b { z0.s }, p0, [x0, z1.s, sxtw]
+; CHECK-NEXT:    ret
   %ptrs = getelementptr i8, i8* %base, <vscale x 16 x i8> %offsets
   call void @llvm.masked.scatter.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8*> %ptrs, i32 1, <vscale x 16 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv8i16(<vscale x 8 x i16> %data, i16* %base, <vscale x 8 x i16> %offsets, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: masked_scatter_nxv8i16
-; CHECK-DAG: st1h { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #1]
-; CHECK-DAG: st1h { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #1]
-; CHECK: ret
+define void @masked_scatter_nxv8i16(<vscale x 8 x i16> %data, i16* %base, <vscale x 8 x i16> %offsets, <vscale x 8 x i1> %mask) #0 {
+; CHECK-LABEL: masked_scatter_nxv8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p1.b
+; CHECK-NEXT:    zip1 p2.h, p0.h, p1.h
+; CHECK-NEXT:    sunpklo z2.s, z1.h
+; CHECK-NEXT:    uunpklo z3.s, z0.h
+; CHECK-NEXT:    zip2 p0.h, p0.h, p1.h
+; CHECK-NEXT:    sunpkhi z1.s, z1.h
+; CHECK-NEXT:    uunpkhi z0.s, z0.h
+; CHECK-NEXT:    st1h { z3.s }, p2, [x0, z2.s, sxtw #1]
+; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw #1]
+; CHECK-NEXT:    ret
   %ptrs = getelementptr i16, i16* %base, <vscale x 8 x i16> %offsets
   call void @llvm.masked.scatter.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16*> %ptrs, i32 1, <vscale x 8 x i1> %mask)
   ret void
 }
 
 define void @masked_scatter_nxv8bf16(<vscale x 8 x bfloat> %data, bfloat* %base, <vscale x 8 x i16> %offsets, <vscale x 8 x i1> %mask) #0 {
-; CHECK-LABEL: masked_scatter_nxv8bf16
-; CHECK-DAG: st1h { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #1]
-; CHECK-DAG: st1h { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #1]
-; CHECK: ret
+; CHECK-LABEL: masked_scatter_nxv8bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p1.b
+; CHECK-NEXT:    zip1 p2.h, p0.h, p1.h
+; CHECK-NEXT:    sunpklo z2.s, z1.h
+; CHECK-NEXT:    uunpklo z3.s, z0.h
+; CHECK-NEXT:    zip2 p0.h, p0.h, p1.h
+; CHECK-NEXT:    sunpkhi z1.s, z1.h
+; CHECK-NEXT:    uunpkhi z0.s, z0.h
+; CHECK-NEXT:    st1h { z3.s }, p2, [x0, z2.s, sxtw #1]
+; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw #1]
+; CHECK-NEXT:    ret
   %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 8 x i16> %offsets
   call void @llvm.masked.scatter.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x bfloat*> %ptrs, i32 1, <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv8f32(<vscale x 8 x float> %data, float* %base, <vscale x 8 x i32> %indexes, <vscale x 8 x i1> %masks) {
-; CHECK-LABEL: masked_scatter_nxv8f32
-; CHECK-DAG: st1w { z0.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, uxtw #2]
-; CHECK-DAG: st1w { z1.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, uxtw #2]
+define void @masked_scatter_nxv8f32(<vscale x 8 x float> %data, float* %base, <vscale x 8 x i32> %indexes, <vscale x 8 x i1> %masks) #0 {
+; CHECK-LABEL: masked_scatter_nxv8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p1.b
+; CHECK-NEXT:    zip1 p2.h, p0.h, p1.h
+; CHECK-NEXT:    zip2 p0.h, p0.h, p1.h
+; CHECK-NEXT:    st1w { z0.s }, p2, [x0, z2.s, uxtw #2]
+; CHECK-NEXT:    st1w { z1.s }, p0, [x0, z3.s, uxtw #2]
+; CHECK-NEXT:    ret
   %ext = zext <vscale x 8 x i32> %indexes to <vscale x 8 x i64>
   %ptrs = getelementptr float, float* %base, <vscale x 8 x i64> %ext
   call void @llvm.masked.scatter.nxv8f32(<vscale x 8 x float> %data, <vscale x 8 x float*> %ptrs, i32 0, <vscale x 8 x i1> %masks)
@@ -46,18 +90,46 @@ define void @masked_scatter_nxv8f32(<vscale x 8 x float> %data, float* %base, <v
 }
 
 ; Code generate the worst case scenario when all vector types are illegal.
-define void @masked_scatter_nxv32i32(<vscale x 32 x i32> %data, i32* %base, <vscale x 32 x i32> %offsets, <vscale x 32 x i1> %mask) {
+define void @masked_scatter_nxv32i32(<vscale x 32 x i32> %data, i32* %base, <vscale x 32 x i32> %offsets, <vscale x 32 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_scatter_nxv32i32:
-; CHECK-NOT: unpkhi
-; CHECK-DAG: st1w { z0.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #2]
-; CHECK-DAG: st1w { z1.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #2]
-; CHECK-DAG: st1w { z2.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #2]
-; CHECK-DAG: st1w { z3.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #2]
-; CHECK-DAG: st1w { z4.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #2]
-; CHECK-DAG: st1w { z5.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #2]
-; CHECK-DAG: st1w { z6.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #2]
-; CHECK-DAG: st1w { z7.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #2]
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    ptrue p2.s
+; CHECK-NEXT:    ld1w { z24.s }, p2/z, [x1, #7, mul vl]
+; CHECK-NEXT:    ld1w { z25.s }, p2/z, [x1, #6, mul vl]
+; CHECK-NEXT:    ld1w { z26.s }, p2/z, [x1, #5, mul vl]
+; CHECK-NEXT:    ld1w { z27.s }, p2/z, [x1, #4, mul vl]
+; CHECK-NEXT:    ld1w { z28.s }, p2/z, [x1, #3, mul vl]
+; CHECK-NEXT:    ld1w { z29.s }, p2/z, [x1, #2, mul vl]
+; CHECK-NEXT:    ld1w { z30.s }, p2/z, [x1, #1, mul vl]
+; CHECK-NEXT:    ld1w { z31.s }, p2/z, [x1]
+; CHECK-NEXT:    pfalse p2.b
+; CHECK-NEXT:    zip1 p3.b, p0.b, p2.b
+; CHECK-NEXT:    zip1 p4.h, p3.h, p2.h
+; CHECK-NEXT:    zip2 p3.h, p3.h, p2.h
+; CHECK-NEXT:    zip2 p0.b, p0.b, p2.b
+; CHECK-NEXT:    st1w { z0.s }, p4, [x0, z31.s, sxtw #2]
+; CHECK-NEXT:    st1w { z1.s }, p3, [x0, z30.s, sxtw #2]
+; CHECK-NEXT:    zip1 p3.h, p0.h, p2.h
+; CHECK-NEXT:    zip2 p0.h, p0.h, p2.h
+; CHECK-NEXT:    st1w { z2.s }, p3, [x0, z29.s, sxtw #2]
+; CHECK-NEXT:    st1w { z3.s }, p0, [x0, z28.s, sxtw #2]
+; CHECK-NEXT:    zip1 p0.b, p1.b, p2.b
+; CHECK-NEXT:    zip1 p3.h, p0.h, p2.h
+; CHECK-NEXT:    zip2 p0.h, p0.h, p2.h
+; CHECK-NEXT:    st1w { z4.s }, p3, [x0, z27.s, sxtw #2]
+; CHECK-NEXT:    st1w { z5.s }, p0, [x0, z26.s, sxtw #2]
+; CHECK-NEXT:    zip2 p0.b, p1.b, p2.b
+; CHECK-NEXT:    zip1 p1.h, p0.h, p2.h
+; CHECK-NEXT:    zip2 p0.h, p0.h, p2.h
+; CHECK-NEXT:    st1w { z6.s }, p1, [x0, z25.s, sxtw #2]
+; CHECK-NEXT:    st1w { z7.s }, p0, [x0, z24.s, sxtw #2]
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %ptrs = getelementptr i32, i32* %base, <vscale x 32 x i32> %offsets
   call void @llvm.masked.scatter.nxv32i32(<vscale x 32 x i32> %data, <vscale x 32 x i32*> %ptrs, i32 4, <vscale x 32 x i1> %mask)
   ret void
@@ -68,4 +140,4 @@ declare void @llvm.masked.scatter.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16*
 declare void @llvm.masked.scatter.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float*>, i32, <vscale x 8 x i1>)
 declare void @llvm.masked.scatter.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat*>, i32, <vscale x 8 x i1>)
 declare void @llvm.masked.scatter.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32*>,  i32, <vscale x 32 x i1>)
-attributes #0 = { "target-features"="+sve,+bf16" }
+attributes #0 = { nounwind "target-features"="+sve,+bf16" }


        


More information about the llvm-commits mailing list