[llvm] 11cea7e - [AArch64] NFC: Clarify and auto-generate some CodeGen tests.

Sander de Smalen via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 24 09:43:29 PST 2022


Author: Sander de Smalen
Date: 2022-01-24T17:42:37Z
New Revision: 11cea7e5ce4d3f6a0d2fac016d503f99c52cdc96

URL: https://github.com/llvm/llvm-project/commit/11cea7e5ce4d3f6a0d2fac016d503f99c52cdc96
DIFF: https://github.com/llvm/llvm-project/commit/11cea7e5ce4d3f6a0d2fac016d503f99c52cdc96.diff

LOG: [AArch64] NFC: Clarify and auto-generate some CodeGen tests.

* For ext-narrow-index.ll, move vscale_range attribute closer to the
  function definition, rather than through indirect #<num> attribute. This
  makes the test a bit easier to read.
* auto-generated CHECK lines for sve-cmp-select.ll and
  named-vector-shuffles-sve.ll.
* re-generated CHECK lines for tests that had a mention they were
  auto-generated, but where the CHECK lines were out of date.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/concat-vector.ll
    llvm/test/CodeGen/AArch64/ext-narrow-index.ll
    llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
    llvm/test/CodeGen/AArch64/neon-stepvector.ll
    llvm/test/CodeGen/AArch64/shift_minsize.ll
    llvm/test/CodeGen/AArch64/sve-cmp-select.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-sdiv-pow2.ll
    llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/concat-vector.ll b/llvm/test/CodeGen/AArch64/concat-vector.ll
index 690fb716771af..1e5d2660a79eb 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector.ll
@@ -88,7 +88,7 @@ define <8 x i32> @concat8(<4 x i32>* %A, <4 x i32>* %B) {
 define <4 x half> @concat9(<2 x half> %A, <2 x half> %B) {
 ; CHECK-LABEL: concat9:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1    v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    zip1 v0.2s, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
    %v4half= shufflevector <2 x half> %A, <2 x half> %B, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
    ret <4 x half> %v4half

diff  --git a/llvm/test/CodeGen/AArch64/ext-narrow-index.ll b/llvm/test/CodeGen/AArch64/ext-narrow-index.ll
index f7f143ff49e31..b296a79ce4f40 100644
--- a/llvm/test/CodeGen/AArch64/ext-narrow-index.ll
+++ b/llvm/test/CodeGen/AArch64/ext-narrow-index.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
 
 ; Tests of shufflevector where the index operand is half the width of the vector
@@ -6,9 +7,9 @@
 ; i8 tests
 define <8 x i8> @i8_off0(<16 x i8> %arg1, <16 x i8> %arg2) {
 ; CHECK-LABEL: i8_off0:
-; CHECK-NOT: mov
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <16 x i8> %arg1, <16 x i8> %arg2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x i8> %shuffle
@@ -16,10 +17,10 @@ entry:
 
 define <8 x i8> @i8_off1(<16 x i8> %arg1, <16 x i8> %arg2) {
 ; CHECK-LABEL: i8_off1:
-; CHECK-NOT: mov
-; CHECK: ext v0.16b, v0.16b, v0.16b, #1
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <16 x i8> %arg1, <16 x i8> %arg2, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
   ret <8 x i8> %shuffle
@@ -27,10 +28,10 @@ entry:
 
 define <8 x i8> @i8_off8(<16 x i8> %arg1, <16 x i8> %arg2) {
 ; CHECK-LABEL: i8_off8:
-; CHECK-NOT: mov
-; CHECK: ext v0.16b, v0.16b, v0.16b, #8
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <16 x i8> %arg1, <16 x i8> %arg2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   ret <8 x i8> %shuffle
@@ -38,9 +39,10 @@ entry:
 
 define <8 x i8> @i8_off15(<16 x i8> %arg1, <16 x i8> %arg2) {
 ; CHECK-LABEL: i8_off15:
-; CHECK: ext v0.16b, v0.16b, v1.16b, #15
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #15
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <16 x i8> %arg1, <16 x i8> %arg2, <8 x i32> <i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22>
   ret <8 x i8> %shuffle
@@ -48,9 +50,10 @@ entry:
 
 define <8 x i8> @i8_off22(<16 x i8> %arg1, <16 x i8> %arg2) {
 ; CHECK-LABEL: i8_off22:
-; CHECK: ext v0.16b, v1.16b, v1.16b, #6
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v1.16b, v1.16b, #6
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <16 x i8> %arg1, <16 x i8> %arg2, <8 x i32> <i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29>
   ret <8 x i8> %shuffle
@@ -59,9 +62,9 @@ entry:
 ; i16 tests
 define <4 x i16> @i16_off0(<8 x i16> %arg1, <8 x i16> %arg2) {
 ; CHECK-LABEL: i16_off0:
-; CHECK-NOT: mov
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <8 x i16> %arg1, <8 x i16> %arg2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i16> %shuffle
@@ -69,10 +72,10 @@ entry:
 
 define <4 x i16> @i16_off1(<8 x i16> %arg1, <8 x i16> %arg2) {
 ; CHECK-LABEL: i16_off1:
-; CHECK-NOT: mov
-; CHECK: ext v0.16b, v0.16b, v0.16b, #2
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #2
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <8 x i16> %arg1, <8 x i16> %arg2, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
   ret <4 x i16> %shuffle
@@ -80,9 +83,10 @@ entry:
 
 define <4 x i16> @i16_off7(<8 x i16> %arg1, <8 x i16> %arg2) {
 ; CHECK-LABEL: i16_off7:
-; CHECK: ext v0.16b, v0.16b, v1.16b, #14
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #14
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <8 x i16> %arg1, <8 x i16> %arg2, <4 x i32> <i32 7, i32 8, i32 9, i32 10>
   ret <4 x i16> %shuffle
@@ -90,9 +94,10 @@ entry:
 
 define <4 x i16> @i16_off8(<8 x i16> %arg1, <8 x i16> %arg2) {
 ; CHECK-LABEL: i16_off8:
-; CHECK: mov v0.16b, v1.16b
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <8 x i16> %arg1, <8 x i16> %arg2, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
   ret <4 x i16> %shuffle
@@ -101,9 +106,9 @@ entry:
 ; i32 tests
 define <2 x i32> @i32_off0(<4 x i32> %arg1, <4 x i32> %arg2) {
 ; CHECK-LABEL: i32_off0:
-; CHECK-NOT: mov
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <4 x i32> %arg1, <4 x i32> %arg2, <2 x i32> <i32 0, i32 1>
   ret <2 x i32> %shuffle
@@ -111,10 +116,10 @@ entry:
 
 define <2 x i32> @i32_off1(<4 x i32> %arg1, <4 x i32> %arg2) {
 ; CHECK-LABEL: i32_off1:
-; CHECK-NOT: mov
-; CHECK: ext v0.16b, v0.16b, v0.16b, #4
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #4
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <4 x i32> %arg1, <4 x i32> %arg2, <2 x i32> <i32 1, i32 2>
   ret <2 x i32> %shuffle
@@ -122,9 +127,10 @@ entry:
 
 define <2 x i32> @i32_off3(<4 x i32> %arg1, <4 x i32> %arg2) {
 ; CHECK-LABEL: i32_off3:
-; CHECK: ext v0.16b, v0.16b, v1.16b, #12
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #12
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <4 x i32> %arg1, <4 x i32> %arg2, <2 x i32> <i32 3, i32 4>
   ret <2 x i32> %shuffle
@@ -132,9 +138,10 @@ entry:
 
 define <2 x i32> @i32_off4(<4 x i32> %arg1, <4 x i32> %arg2) {
 ; CHECK-LABEL: i32_off4:
-; CHECK: mov v0.16b, v1.16b
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <4 x i32> %arg1, <4 x i32> %arg2, <2 x i32> <i32 4, i32 5>
   ret <2 x i32> %shuffle
@@ -143,9 +150,9 @@ entry:
 ; i64 tests
 define <1 x i64> @i64_off0(<2 x i64> %arg1, <2 x i64> %arg2) {
 ; CHECK-LABEL: i64_off0:
-; CHECK-NOT: mov
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <2 x i64> %arg1, <2 x i64> %arg2, <1 x i32> <i32 0>
   ret <1 x i64> %shuffle
@@ -153,10 +160,10 @@ entry:
 
 define <1 x i64> @i64_off1(<2 x i64> %arg1, <2 x i64> %arg2) {
 ; CHECK-LABEL: i64_off1:
-; CHECK-NOT: mov
-; CHECK: ext v0.16b, v0.16b, v0.16b, #8
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <2 x i64> %arg1, <2 x i64> %arg2, <1 x i32> <i32 1>
   ret <1 x i64> %shuffle
@@ -164,9 +171,10 @@ entry:
 
 define <1 x i64> @i64_off2(<2 x i64> %arg1, <2 x i64> %arg2) {
 ; CHECK-LABEL: i64_off2:
-; CHECK: mov v0.16b, v1.16b
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <2 x i64> %arg1, <2 x i64> %arg2, <1 x i32> <i32 2>
   ret <1 x i64> %shuffle
@@ -175,9 +183,9 @@ entry:
 ; i8 tests with second operand zero
 define <8 x i8> @i8_zero_off0(<16 x i8> %arg1) {
 ; CHECK-LABEL: i8_zero_off0:
-; CHECK-NOT: mov
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <16 x i8> %arg1, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x i8> %shuffle
@@ -185,10 +193,10 @@ entry:
 
 define <8 x i8> @i8_zero_off1(<16 x i8> %arg1) {
 ; CHECK-LABEL: i8_zero_off1:
-; CHECK-NOT: mov
-; CHECK: ext v0.16b, v0.16b, v0.16b, #1
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <16 x i8> %arg1, <16 x i8> zeroinitializer, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
   ret <8 x i8> %shuffle
@@ -196,10 +204,10 @@ entry:
 
 define <8 x i8> @i8_zero_off8(<16 x i8> %arg1) {
 ; CHECK-LABEL: i8_zero_off8:
-; CHECK-NOT: mov
-; CHECK: ext v0.16b, v0.16b, v0.16b, #8
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <16 x i8> %arg1, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   ret <8 x i8> %shuffle
@@ -207,10 +215,11 @@ entry:
 
 define <8 x i8> @i8_zero_off15(<16 x i8> %arg1) {
 ; CHECK-LABEL: i8_zero_off15:
-; CHECK: movi [[REG:v[0-9]+]].2d, #0
-; CHECK: ext v0.16b, v0.16b, [[REG]].16b, #15
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #15
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <16 x i8> %arg1, <16 x i8> zeroinitializer, <8 x i32> <i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22>
   ret <8 x i8> %shuffle
@@ -218,9 +227,9 @@ entry:
 
 define <8 x i8> @i8_zero_off22(<16 x i8> %arg1) {
 ; CHECK-LABEL: i8_zero_off22:
-; CHECK: movi v0.2d, #0
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <16 x i8> %arg1, <16 x i8> zeroinitializer, <8 x i32> <i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29>
   ret <8 x i8> %shuffle
@@ -229,9 +238,9 @@ entry:
 ; i16 tests with second operand zero
 define <4 x i16> @i16_zero_off0(<8 x i16> %arg1) {
 ; CHECK-LABEL: i16_zero_off0:
-; CHECK-NOT: mov
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <8 x i16> %arg1, <8 x i16> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i16> %shuffle
@@ -239,10 +248,10 @@ entry:
 
 define <4 x i16> @i16_zero_off1(<8 x i16> %arg1) {
 ; CHECK-LABEL: i16_zero_off1:
-; CHECK-NOT: mov
-; CHECK: ext v0.16b, v0.16b, v0.16b, #2
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #2
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <8 x i16> %arg1, <8 x i16> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
   ret <4 x i16> %shuffle
@@ -250,10 +259,11 @@ entry:
 
 define <4 x i16> @i16_zero_off7(<8 x i16> %arg1) {
 ; CHECK-LABEL: i16_zero_off7:
-; CHECK: movi [[REG:v[0-9]+]].2d, #0
-; CHECK: ext v0.16b, v0.16b, [[REG]].16b, #14
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #14
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <8 x i16> %arg1, <8 x i16> zeroinitializer, <4 x i32> <i32 7, i32 8, i32 9, i32 10>
   ret <4 x i16> %shuffle
@@ -261,9 +271,9 @@ entry:
 
 define <4 x i16> @i16_zero_off8(<8 x i16> %arg1) {
 ; CHECK-LABEL: i16_zero_off8:
-; CHECK: movi v0.2d, #0
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <8 x i16> %arg1, <8 x i16> zeroinitializer, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
   ret <4 x i16> %shuffle
@@ -272,9 +282,9 @@ entry:
 ; i32 tests with second operand zero
 define <2 x i32> @i32_zero_off0(<4 x i32> %arg1) {
 ; CHECK-LABEL: i32_zero_off0:
-; CHECK-NOT: mov
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <4 x i32> %arg1, <4 x i32> zeroinitializer, <2 x i32> <i32 0, i32 1>
   ret <2 x i32> %shuffle
@@ -282,10 +292,10 @@ entry:
 
 define <2 x i32> @i32_zero_off1(<4 x i32> %arg1) {
 ; CHECK-LABEL: i32_zero_off1:
-; CHECK-NOT: mov
-; CHECK: ext v0.16b, v0.16b, v0.16b, #4
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #4
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <4 x i32> %arg1, <4 x i32> zeroinitializer, <2 x i32> <i32 1, i32 2>
   ret <2 x i32> %shuffle
@@ -293,10 +303,11 @@ entry:
 
 define <2 x i32> @i32_zero_off3(<4 x i32> %arg1) {
 ; CHECK-LABEL: i32_zero_off3:
-; CHECK: movi [[REG:v[0-9]+]].2d, #0
-; CHECK: ext v0.16b, v0.16b, [[REG]].16b, #12
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #12
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <4 x i32> %arg1, <4 x i32> zeroinitializer, <2 x i32> <i32 3, i32 4>
   ret <2 x i32> %shuffle
@@ -304,9 +315,9 @@ entry:
 
 define <2 x i32> @i32_zero_off4(<4 x i32> %arg1) {
 ; CHECK-LABEL: i32_zero_off4:
-; CHECK: movi v0.2d, #0
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <4 x i32> %arg1, <4 x i32> zeroinitializer, <2 x i32> <i32 4, i32 5>
   ret <2 x i32> %shuffle
@@ -315,9 +326,9 @@ entry:
 ; i64 tests with second operand zero
 define <1 x i64> @i64_zero_off0(<2 x i64> %arg1) {
 ; CHECK-LABEL: i64_zero_off0:
-; CHECK-NOT: mov
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <2 x i64> %arg1, <2 x i64> zeroinitializer, <1 x i32> <i32 0>
   ret <1 x i64> %shuffle
@@ -325,10 +336,10 @@ entry:
 
 define <1 x i64> @i64_zero_off1(<2 x i64> %arg1) {
 ; CHECK-LABEL: i64_zero_off1:
-; CHECK-NOT: mov
-; CHECK: ext v0.16b, v0.16b, v0.16b, #8
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <2 x i64> %arg1, <2 x i64> zeroinitializer, <1 x i32> <i32 1>
   ret <1 x i64> %shuffle
@@ -336,9 +347,9 @@ entry:
 
 define <1 x i64> @i64_zero_off2(<2 x i64> %arg1) {
 ; CHECK-LABEL: i64_zero_off2:
-; CHECK: fmov d0, xzr
-; CHECK-NOT: ext
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmov d0, xzr
+; CHECK-NEXT:    ret
 entry:
   %shuffle = shufflevector <2 x i64> %arg1, <2 x i64> zeroinitializer, <1 x i32> <i32 2>
   ret <1 x i64> %shuffle

diff  --git a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
index bce9e035da8c1..404811433ac20 100644
--- a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
+++ b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
@@ -24,7 +24,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_first_idx(<vscale x 16 x i8> %a, <vsca
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @splice_nxv16i8_last_idx(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #1 {
+define <vscale x 16 x i8> @splice_nxv16i8_last_idx(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) vscale_range(16,16) #0 {
 ; CHECK-LABEL: splice_nxv16i8_last_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #255
@@ -51,7 +51,7 @@ define <vscale x 4 x i32> @splice_nxv4i32_first_idx(<vscale x 4 x i32> %a, <vsca
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @splice_nxv4i32_last_idx(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #1 {
+define <vscale x 4 x i32> @splice_nxv4i32_last_idx(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) vscale_range(16,16) #0 {
 ; CHECK-LABEL: splice_nxv4i32_last_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #252
@@ -69,7 +69,7 @@ define <vscale x 2 x i64> @splice_nxv2i64_first_idx(<vscale x 2 x i64> %a, <vsca
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @splice_nxv2i64_last_idx(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #1 {
+define <vscale x 2 x i64> @splice_nxv2i64_last_idx(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) vscale_range(16,16) #0 {
 ; CHECK-LABEL: splice_nxv2i64_last_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #248
@@ -109,7 +109,7 @@ define <vscale x 2 x half> @splice_nxv2f16_first_idx(<vscale x 2 x half> %a, <vs
   ret <vscale x 2 x half> %res
 }
 
-define <vscale x 2 x half> @splice_nxv2f16_last_idx(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #1 {
+define <vscale x 2 x half> @splice_nxv2f16_last_idx(<vscale x 2 x half> %a, <vscale x 2 x half> %b) vscale_range(16,16) #0 {
 ; CHECK-LABEL: splice_nxv2f16_last_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #248
@@ -149,7 +149,7 @@ define <vscale x 4 x half> @splice_nxv4f16_first_idx(<vscale x 4 x half> %a, <vs
   ret <vscale x 4 x half> %res
 }
 
-define <vscale x 4 x half> @splice_nxv4f16_last_idx(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #1 {
+define <vscale x 4 x half> @splice_nxv4f16_last_idx(<vscale x 4 x half> %a, <vscale x 4 x half> %b) vscale_range(16,16) #0 {
 ; CHECK-LABEL: splice_nxv4f16_last_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #252
@@ -167,7 +167,7 @@ define <vscale x 8 x half> @splice_nxv8f16_first_idx(<vscale x 8 x half> %a, <vs
   ret <vscale x 8 x half> %res
 }
 
-define <vscale x 8 x half> @splice_nxv8f16_last_idx(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #1 {
+define <vscale x 8 x half> @splice_nxv8f16_last_idx(<vscale x 8 x half> %a, <vscale x 8 x half> %b) vscale_range(16,16) #0 {
 ; CHECK-LABEL: splice_nxv8f16_last_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #254
@@ -207,7 +207,7 @@ define <vscale x 2 x float> @splice_nxv2f32_first_idx(<vscale x 2 x float> %a, <
   ret <vscale x 2 x float> %res
 }
 
-define <vscale x 2 x float> @splice_nxv2f32_last_idx(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #1 {
+define <vscale x 2 x float> @splice_nxv2f32_last_idx(<vscale x 2 x float> %a, <vscale x 2 x float> %b) vscale_range(16,16) #0 {
 ; CHECK-LABEL: splice_nxv2f32_last_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #248
@@ -225,7 +225,7 @@ define <vscale x 4 x float> @splice_nxv4f32_first_idx(<vscale x 4 x float> %a, <
   ret <vscale x 4 x float> %res
 }
 
-define <vscale x 4 x float> @splice_nxv4f32_last_idx(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #1 {
+define <vscale x 4 x float> @splice_nxv4f32_last_idx(<vscale x 4 x float> %a, <vscale x 4 x float> %b) vscale_range(16,16) #0 {
 ; CHECK-LABEL: splice_nxv4f32_last_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #252
@@ -243,7 +243,7 @@ define <vscale x 2 x double> @splice_nxv2f64_first_idx(<vscale x 2 x double> %a,
   ret <vscale x 2 x double> %res
 }
 
-define <vscale x 2 x double> @splice_nxv2f64_last_idx(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #1 {
+define <vscale x 2 x double> @splice_nxv2f64_last_idx(<vscale x 2 x double> %a, <vscale x 2 x double> %b) vscale_range(16,16) #0 {
 ; CHECK-LABEL: splice_nxv2f64_last_idx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #248
@@ -345,7 +345,7 @@ define <vscale x 8 x i32> @splice_nxv8i32_idx(<vscale x 8 x i32> %a, <vscale x 8
 }
 
 ; Verify splitvec type legalisation works as expected.
-define <vscale x 16 x float> @splice_nxv16f32_16(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #2 {
+define <vscale x 16 x float> @splice_nxv16f32_16(<vscale x 16 x float> %a, <vscale x 16 x float> %b) vscale_range(2,16) #0 {
 ; CHECK-LABEL: splice_nxv16f32_16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -392,7 +392,7 @@ define <vscale x 16 x i8> @splice_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @splice_nxv16i8_neg32(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #2 {
+define <vscale x 16 x i8> @splice_nxv16i8_neg32(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) vscale_range(2,16) #0 {
 ; CHECK-LABEL: splice_nxv16i8_neg32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl32
@@ -403,7 +403,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_neg32(<vscale x 16 x i8> %a, <vscale x
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @splice_nxv16i8_neg64(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #3 {
+define <vscale x 16 x i8> @splice_nxv16i8_neg64(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) vscale_range(4,16) #0 {
 ; CHECK-LABEL: splice_nxv16i8_neg64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl64
@@ -414,7 +414,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_neg64(<vscale x 16 x i8> %a, <vscale x
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @splice_nxv16i8_neg128(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #4 {
+define <vscale x 16 x i8> @splice_nxv16i8_neg128(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) vscale_range(8,16) #0 {
 ; CHECK-LABEL: splice_nxv16i8_neg128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl128
@@ -425,7 +425,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_neg128(<vscale x 16 x i8> %a, <vscale
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @splice_nxv16i8_neg256(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #1 {
+define <vscale x 16 x i8> @splice_nxv16i8_neg256(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) vscale_range(16,16) #0 {
 ; CHECK-LABEL: splice_nxv16i8_neg256:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl256
@@ -447,7 +447,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_1(<vscale x 16 x i8> %a, <vscale x 16
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @splice_nxv16i8_neg17(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #2 {
+define <vscale x 16 x i8> @splice_nxv16i8_neg17(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) vscale_range(2,16) #0 {
 ; CHECK-LABEL: splice_nxv16i8_neg17:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -492,7 +492,7 @@ define <vscale x 8 x i16> @splice_nxv8i16_1(<vscale x 8 x i16> %a, <vscale x 8 x
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @splice_nxv8i16_neg9(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #2 {
+define <vscale x 8 x i16> @splice_nxv8i16_neg9(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) vscale_range(2,16) #0 {
 ; CHECK-LABEL: splice_nxv8i16_neg9:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -537,7 +537,7 @@ define <vscale x 4 x i32> @splice_nxv4i32_1(<vscale x 4 x i32> %a, <vscale x 4 x
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @splice_nxv4i32_neg5(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #2 {
+define <vscale x 4 x i32> @splice_nxv4i32_neg5(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) vscale_range(2,16) #0 {
 ; CHECK-LABEL: splice_nxv4i32_neg5:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s, vl5
@@ -570,7 +570,7 @@ define <vscale x 2 x i64> @splice_nxv2i64_1(<vscale x 2 x i64> %a, <vscale x 2 x
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @splice_nxv2i64_neg3(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #2 {
+define <vscale x 2 x i64> @splice_nxv2i64_neg3(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) vscale_range(2,16) #0 {
 ; CHECK-LABEL: splice_nxv2i64_neg3:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d, vl3
@@ -603,7 +603,7 @@ define <vscale x 8 x half> @splice_nxv8f16_1(<vscale x 8 x half> %a, <vscale x 8
   ret <vscale x 8 x half> %res
 }
 
-define <vscale x 8 x half> @splice_nxv8f16_neg9(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #2 {
+define <vscale x 8 x half> @splice_nxv8f16_neg9(<vscale x 8 x half> %a, <vscale x 8 x half> %b) vscale_range(2,16) #0 {
 ; CHECK-LABEL: splice_nxv8f16_neg9:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -648,7 +648,7 @@ define <vscale x 4 x float> @splice_nxv4f32_1(<vscale x 4 x float> %a, <vscale x
   ret <vscale x 4 x float> %res
 }
 
-define <vscale x 4 x float> @splice_nxv4f32_neg5(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #2 {
+define <vscale x 4 x float> @splice_nxv4f32_neg5(<vscale x 4 x float> %a, <vscale x 4 x float> %b) vscale_range(2,16) #0 {
 ; CHECK-LABEL: splice_nxv4f32_neg5:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s, vl5
@@ -681,7 +681,7 @@ define <vscale x 2 x double> @splice_nxv2f64_1(<vscale x 2 x double> %a, <vscale
   ret <vscale x 2 x double> %res
 }
 
-define <vscale x 2 x double> @splice_nxv2f64_neg3(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #2 {
+define <vscale x 2 x double> @splice_nxv2f64_neg3(<vscale x 2 x double> %a, <vscale x 2 x double> %b) vscale_range(2,16) #0 {
 ; CHECK-LABEL: splice_nxv2f64_neg3:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d, vl3
@@ -797,7 +797,7 @@ define <vscale x 8 x i32> @splice_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i
 }
 
 ; Verify splitvec type legalisation works as expected.
-define <vscale x 16 x float> @splice_nxv16f32_neg17(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #2 {
+define <vscale x 16 x float> @splice_nxv16f32_neg17(<vscale x 16 x float> %a, <vscale x 16 x float> %b) vscale_range(2,16) #0 {
 ; CHECK-LABEL: splice_nxv16f32_neg17:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -848,7 +848,3 @@ declare <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale
 declare <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, i32)
 
 attributes #0 = { nounwind "target-features"="+sve" }
-attributes #1 = { nounwind "target-features"="+sve" vscale_range(16,16) }
-attributes #2 = { nounwind "target-features"="+sve" vscale_range(2,16) }
-attributes #3 = { nounwind "target-features"="+sve" vscale_range(4,16) }
-attributes #4 = { nounwind "target-features"="+sve" vscale_range(8,16) }

diff  --git a/llvm/test/CodeGen/AArch64/neon-stepvector.ll b/llvm/test/CodeGen/AArch64/neon-stepvector.ll
index 05308bf5f6d76..7255574f42097 100644
--- a/llvm/test/CodeGen/AArch64/neon-stepvector.ll
+++ b/llvm/test/CodeGen/AArch64/neon-stepvector.ll
@@ -1,4 +1,3 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+neon < %s | FileCheck %s --check-prefixes=CHECK
 
 ; LEGAL INTEGER TYPES

diff  --git a/llvm/test/CodeGen/AArch64/shift_minsize.ll b/llvm/test/CodeGen/AArch64/shift_minsize.ll
index 78d87ff77762c..cc29e3a5f04f5 100644
--- a/llvm/test/CodeGen/AArch64/shift_minsize.ll
+++ b/llvm/test/CodeGen/AArch64/shift_minsize.ll
@@ -16,6 +16,11 @@ define i64 @f0(i64 %val, i64 %amt) minsize optsize {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl x0, x0, x1
 ; CHECK-NEXT:    ret
+;
+; CHECK-DARWIN-LABEL: f0:
+; CHECK-DARWIN:       ; %bb.0:
+; CHECK-DARWIN-NEXT:    lsl x0, x0, x1
+; CHECK-DARWIN-NEXT:    ret
   %res = shl i64 %val, %amt
   ret i64 %res
 }
@@ -26,6 +31,12 @@ define i32 @f1(i64 %x, i64 %y) minsize optsize {
 ; CHECK-NEXT:    lsl x0, x0, x1
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
+;
+; CHECK-DARWIN-LABEL: f1:
+; CHECK-DARWIN:       ; %bb.0:
+; CHECK-DARWIN-NEXT:    lsl x0, x0, x1
+; CHECK-DARWIN-NEXT:    ; kill: def $w0 killed $w0 killed $x0
+; CHECK-DARWIN-NEXT:    ret
 	%a = shl i64 %x, %y
 	%b = trunc i64 %a to i32
 	ret i32 %b
@@ -37,6 +48,12 @@ define i32 @f2(i64 %x, i64 %y) minsize optsize {
 ; CHECK-NEXT:    asr x0, x0, x1
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
+;
+; CHECK-DARWIN-LABEL: f2:
+; CHECK-DARWIN:       ; %bb.0:
+; CHECK-DARWIN-NEXT:    asr x0, x0, x1
+; CHECK-DARWIN-NEXT:    ; kill: def $w0 killed $w0 killed $x0
+; CHECK-DARWIN-NEXT:    ret
 	%a = ashr i64 %x, %y
 	%b = trunc i64 %a to i32
 	ret i32 %b
@@ -48,6 +65,12 @@ define i32 @f3(i64 %x, i64 %y) minsize optsize {
 ; CHECK-NEXT:    lsr x0, x0, x1
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
+;
+; CHECK-DARWIN-LABEL: f3:
+; CHECK-DARWIN:       ; %bb.0:
+; CHECK-DARWIN-NEXT:    lsr x0, x0, x1
+; CHECK-DARWIN-NEXT:    ; kill: def $w0 killed $w0 killed $x0
+; CHECK-DARWIN-NEXT:    ret
 	%a = lshr i64 %x, %y
 	%b = trunc i64 %a to i32
 	ret i32 %b
@@ -62,6 +85,20 @@ define dso_local { i64, i64 } @shl128(i64 %x.coerce0, i64 %x.coerce1, i8 signext
 ; CHECK-NEXT:    bl __ashlti3
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
+;
+; CHECK-DARWIN-LABEL: shl128:
+; CHECK-DARWIN:       ; %bb.0: ; %entry
+; CHECK-DARWIN-NEXT:    mvn w8, w2
+; CHECK-DARWIN-NEXT:    mov w9, w2
+; CHECK-DARWIN-NEXT:    lsr x10, x0, #1
+; CHECK-DARWIN-NEXT:    tst x9, #0x40
+; CHECK-DARWIN-NEXT:    lsr x8, x10, x8
+; CHECK-DARWIN-NEXT:    lsl x10, x1, x9
+; CHECK-DARWIN-NEXT:    orr x8, x10, x8
+; CHECK-DARWIN-NEXT:    lsl x10, x0, x9
+; CHECK-DARWIN-NEXT:    csel x1, x10, x8, ne
+; CHECK-DARWIN-NEXT:    csel x0, xzr, x10, ne
+; CHECK-DARWIN-NEXT:    ret
 
 entry:
   %x.sroa.2.0.insert.ext = zext i64 %x.coerce1 to i128
@@ -88,6 +125,21 @@ define dso_local { i64, i64 } @ashr128(i64 %x.coerce0, i64 %x.coerce1, i8 signex
 ; CHECK-NEXT:    bl __ashrti3
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
+;
+; CHECK-DARWIN-LABEL: ashr128:
+; CHECK-DARWIN:       ; %bb.0: ; %entry
+; CHECK-DARWIN-NEXT:    mov w8, w2
+; CHECK-DARWIN-NEXT:    mvn w9, w2
+; CHECK-DARWIN-NEXT:    lsl x10, x1, #1
+; CHECK-DARWIN-NEXT:    tst x8, #0x40
+; CHECK-DARWIN-NEXT:    lsr x11, x0, x8
+; CHECK-DARWIN-NEXT:    lsl x9, x10, x9
+; CHECK-DARWIN-NEXT:    asr x10, x1, x8
+; CHECK-DARWIN-NEXT:    orr x9, x9, x11
+; CHECK-DARWIN-NEXT:    asr x8, x1, #63
+; CHECK-DARWIN-NEXT:    csel x0, x10, x9, ne
+; CHECK-DARWIN-NEXT:    csel x1, x8, x10, ne
+; CHECK-DARWIN-NEXT:    ret
 entry:
   %x.sroa.2.0.insert.ext = zext i64 %x.coerce1 to i128
   %x.sroa.2.0.insert.shift = shl nuw i128 %x.sroa.2.0.insert.ext, 64
@@ -113,6 +165,20 @@ define dso_local { i64, i64 } @lshr128(i64 %x.coerce0, i64 %x.coerce1, i8 signex
 ; CHECK-NEXT:    bl __lshrti3
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
+;
+; CHECK-DARWIN-LABEL: lshr128:
+; CHECK-DARWIN:       ; %bb.0: ; %entry
+; CHECK-DARWIN-NEXT:    mov w8, w2
+; CHECK-DARWIN-NEXT:    mvn w9, w2
+; CHECK-DARWIN-NEXT:    lsl x10, x1, #1
+; CHECK-DARWIN-NEXT:    tst x8, #0x40
+; CHECK-DARWIN-NEXT:    lsr x11, x0, x8
+; CHECK-DARWIN-NEXT:    lsl x9, x10, x9
+; CHECK-DARWIN-NEXT:    orr x9, x9, x11
+; CHECK-DARWIN-NEXT:    lsr x10, x1, x8
+; CHECK-DARWIN-NEXT:    csel x0, x10, x9, ne
+; CHECK-DARWIN-NEXT:    csel x1, xzr, x10, ne
+; CHECK-DARWIN-NEXT:    ret
 entry:
   %x.sroa.2.0.insert.ext = zext i64 %x.coerce1 to i128
   %x.sroa.2.0.insert.shift = shl nuw i128 %x.sroa.2.0.insert.ext, 64

diff  --git a/llvm/test/CodeGen/AArch64/sve-cmp-select.ll b/llvm/test/CodeGen/AArch64/sve-cmp-select.ll
index b04e8d922c803..1a30005fa4674 100644
--- a/llvm/test/CodeGen/AArch64/sve-cmp-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-cmp-select.ll
@@ -1,36 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-linux-unknown -mattr=+sve -o - < %s | FileCheck %s
 
 define <vscale x 16 x i8> @vselect_cmp_ne(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
-  ; CHECK-LABEL: vselect_cmp_ne
-  ; CHECK:       // %bb.0:
-	; CHECK-NEXT:    ptrue	p0.b
-	; CHECK-NEXT:    cmpne	p0.b, p0/z, z0.b, z1.b
-	; CHECK-NEXT:    sel	z0.b, p0, z1.b, z2.b
-	; CHECK-NEXT:    ret
+; CHECK-LABEL: vselect_cmp_ne:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    cmpne p0.b, p0/z, z0.b, z1.b
+; CHECK-NEXT:    sel z0.b, p0, z1.b, z2.b
+; CHECK-NEXT:    ret
   %cmp = icmp ne <vscale x 16 x i8> %a, %b
   %d = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c
   ret <vscale x 16 x i8> %d
 }
 
 define <vscale x 16 x i8> @vselect_cmp_sgt(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
-  ; CHECK-LABEL: vselect_cmp_sgt
-  ; CHECK:       // %bb.0:
-  ; CHECK-NEXT: 	ptrue	p0.b
-  ; CHECK-NEXT: 	cmpgt	p0.b, p0/z, z0.b, z1.b
-  ; CHECK-NEXT: 	sel	z0.b, p0, z1.b, z2.b
-  ; CHECK-NEXT: 	ret
+; CHECK-LABEL: vselect_cmp_sgt:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    cmpgt p0.b, p0/z, z0.b, z1.b
+; CHECK-NEXT:    sel z0.b, p0, z1.b, z2.b
+; CHECK-NEXT:    ret
   %cmp = icmp sgt <vscale x 16 x i8> %a, %b
   %d = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c
   ret <vscale x 16 x i8> %d
 }
 
 define <vscale x 16 x i8> @vselect_cmp_ugt(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
-  ; CHECK-LABEL: vselect_cmp_ugt
-  ; CHECK:       // %bb.0:
-  ; CHECK-NEXT: 	ptrue	p0.b
-  ; CHECK-NEXT: 	cmphi	p0.b, p0/z, z0.b, z1.b
-  ; CHECK-NEXT: 	sel	z0.b, p0, z1.b, z2.b
-  ; CHECK-NEXT: 	ret
+; CHECK-LABEL: vselect_cmp_ugt:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    cmphi p0.b, p0/z, z0.b, z1.b
+; CHECK-NEXT:    sel z0.b, p0, z1.b, z2.b
+; CHECK-NEXT:    ret
   %cmp = icmp ugt <vscale x 16 x i8> %a, %b
   %d = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c
   ret <vscale x 16 x i8> %d

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-sdiv-pow2.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-sdiv-pow2.ll
index 76163f37a2aca..e09a79fb838f0 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-sdiv-pow2.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-sdiv-pow2.ll
@@ -1,4 +1,3 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
 ; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -check-prefixes=CHECK
 ; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
@@ -21,7 +20,7 @@ define <8 x i8> @sdiv_v8i8(<8 x i8> %op1) #0 {
 ; CHECK-LABEL: sdiv_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p0.b, vl8
 ; CHECK-NEXT:    asrd z0.b, p0/m, z0.b, #5
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
@@ -33,7 +32,7 @@ define <16 x i8> @sdiv_v16i8(<16 x i8> %op1) #0 {
 ; CHECK-LABEL: sdiv_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p0.b, vl16
 ; CHECK-NEXT:    asrd z0.b, p0/m, z0.b, #5
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
@@ -113,7 +112,7 @@ define <4 x i16> @sdiv_v4i16(<4 x i16> %op1) #0 {
 ; CHECK-LABEL: sdiv_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    asrd z0.h, p0/m, z0.h, #5
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
@@ -125,7 +124,7 @@ define <8 x i16> @sdiv_v8i16(<8 x i16> %op1) #0 {
 ; CHECK-LABEL: sdiv_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    asrd z0.h, p0/m, z0.h, #5
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
@@ -205,7 +204,7 @@ define <2 x i32> @sdiv_v2i32(<2 x i32> %op1) #0 {
 ; CHECK-LABEL: sdiv_v2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    asrd z0.s, p0/m, z0.s, #5
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
@@ -217,7 +216,7 @@ define <4 x i32> @sdiv_v4i32(<4 x i32> %op1) #0 {
 ; CHECK-LABEL: sdiv_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    asrd z0.s, p0/m, z0.s, #5
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
@@ -297,7 +296,7 @@ define <1 x i64> @sdiv_v1i64(<1 x i64> %op1) #0 {
 ; CHECK-LABEL: sdiv_v1i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl1
 ; CHECK-NEXT:    asrd z0.d, p0/m, z0.d, #5
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
@@ -310,7 +309,7 @@ define <2 x i64> @sdiv_v2i64(<2 x i64> %op1) #0 {
 ; CHECK-LABEL: sdiv_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    asrd z0.d, p0/m, z0.d, #5
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll b/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll
index 4e737424506d0..ddc2f5bd81284 100644
--- a/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll
@@ -177,11 +177,11 @@ define <vscale x 2 x i1> @masked_load_sext_i8i64_parg(i8* %ap, <vscale x 16 x i8
 define <vscale x 8 x i1> @masked_load_sext_i8i16_ptrue_all(i8* %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i16_ptrue_all:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p0.b, vl64
 ; CHECK-NEXT:    cmpeq p0.b, p0/z, z0.b, #0
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ptrue p0.h, vl32
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ret
   %p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11)
@@ -197,12 +197,12 @@ define <vscale x 8 x i1> @masked_load_sext_i8i16_ptrue_all(i8* %ap, <vscale x 16
 define <vscale x 4 x i1> @masked_load_sext_i8i32_ptrue_all(i8* %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i32_ptrue_all:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ptrue p0.b, vl64
 ; CHECK-NEXT:    cmpeq p0.b, p0/z, z0.b, #0
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    punpklo p0.h, p0.b
 ; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl32
 ; CHECK-NEXT:    cmpne p0.s, p0/z, z0.s, #0
 ; CHECK-NEXT:    ret
   %p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11)


        


More information about the llvm-commits mailing list