[llvm] [AArch64][SME] Make getRegAllocationHints stricter for multi-vector loads (PR #123081)

Kerry McLaughlin via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 21 05:32:52 PST 2025


https://github.com/kmclaughlin-arm updated https://github.com/llvm/llvm-project/pull/123081

>From a7aa633776e6e8f01b52ce2b92cbbe3ae93095f7 Mon Sep 17 00:00:00 2001
From: Kerry McLaughlin <kerry.mclaughlin at arm.com>
Date: Wed, 15 Jan 2025 12:01:06 +0000
Subject: [PATCH 1/3] Add uses of %scalable_arg in SME2 dot intrinsics tests

---
 .../AArch64/sme2-intrinsics-int-dots.ll       | 392 +++++++++++-------
 .../CodeGen/AArch64/sme2-intrinsics-vdot.ll   | 358 ++++++++++------
 2 files changed, 480 insertions(+), 270 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
index 86ed63d743713c..4aeaa6feaed048 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
@@ -615,15 +615,21 @@ define void @udot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z8.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
-; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z8.b, z9.b }, z0.b[0]
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    mov z2.d, z1.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
+; CHECK-NEXT:    mov z2.d, z9.d
+; CHECK-NEXT:    mov z3.d, z10.d
+; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -638,6 +644,7 @@ entry:
   %6 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %4, 1
   tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %2, <vscale x 16 x i8> %5, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %6, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 16 x i8> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -695,38 +702,58 @@ define void @udot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-LABEL: udot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-9
-; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    addvl sp, sp, #-12
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z15, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z14, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x10, x9, x1
-; CHECK-NEXT:    str z13, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    mov z16.d, z1.d
+; CHECK-NEXT:    mov z17.d, z2.d
+; CHECK-NEXT:    mov z18.d, z3.d
+; CHECK-NEXT:    mov z4.d, z5.d
+; CHECK-NEXT:    mov z5.d, z6.d
+; CHECK-NEXT:    mov z6.d, z7.d
+; CHECK-NEXT:    mov z7.d, z20.d
+; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z9.d
+; CHECK-NEXT:    mov z5.d, z10.d
+; CHECK-NEXT:    mov z6.d, z11.d
+; CHECK-NEXT:    mov z7.d, z24.d
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z13.d
+; CHECK-NEXT:    mov z5.d, z14.d
+; CHECK-NEXT:    mov z6.d, z15.d
+; CHECK-NEXT:    mov z7.d, z28.d
 ; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z8.b - z11.b }, z0.b[0]
-; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b[0]
-; CHECK-NEXT:    ldr z15, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #9
+; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
+; CHECK-NEXT:    addvl sp, sp, #12
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -760,6 +787,7 @@ entry:
   tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 16 x i8> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -863,15 +891,21 @@ define void @usdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z8.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z8.b, z9.b }, z0.b[0]
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    mov z2.d, z1.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
+; CHECK-NEXT:    mov z2.d, z9.d
+; CHECK-NEXT:    mov z3.d, z10.d
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -886,6 +920,7 @@ entry:
   %6 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %4, 1
   tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %2, <vscale x 16 x i8> %5, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %6, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 16 x i8> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -943,38 +978,58 @@ define void @usdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-LABEL: usdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-9
-; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    addvl sp, sp, #-12
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z15, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z14, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x10, x9, x1
-; CHECK-NEXT:    str z13, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    mov z16.d, z1.d
+; CHECK-NEXT:    mov z17.d, z2.d
+; CHECK-NEXT:    mov z18.d, z3.d
+; CHECK-NEXT:    mov z4.d, z5.d
+; CHECK-NEXT:    mov z5.d, z6.d
+; CHECK-NEXT:    mov z6.d, z7.d
+; CHECK-NEXT:    mov z7.d, z20.d
+; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z9.d
+; CHECK-NEXT:    mov z5.d, z10.d
+; CHECK-NEXT:    mov z6.d, z11.d
+; CHECK-NEXT:    mov z7.d, z24.d
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z13.d
+; CHECK-NEXT:    mov z5.d, z14.d
+; CHECK-NEXT:    mov z6.d, z15.d
+; CHECK-NEXT:    mov z7.d, z28.d
 ; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z8.b - z11.b }, z0.b[0]
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b[0]
-; CHECK-NEXT:    ldr z15, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #9
+; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
+; CHECK-NEXT:    addvl sp, sp, #12
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -1008,6 +1063,7 @@ entry:
   tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.usdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 16 x i8> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -1113,15 +1169,21 @@ define void @sdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z8.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z8.b, z9.b }, z0.b[0]
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    mov z2.d, z1.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
+; CHECK-NEXT:    mov z2.d, z9.d
+; CHECK-NEXT:    mov z3.d, z10.d
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1136,6 +1198,7 @@ entry:
   %6 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %4, 1
   tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %2, <vscale x 16 x i8> %5, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %6, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 16 x i8> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -1193,38 +1256,58 @@ define void @sdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-LABEL: sdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-9
-; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    addvl sp, sp, #-12
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z15, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z14, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x10, x9, x1
-; CHECK-NEXT:    str z13, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    mov z16.d, z1.d
+; CHECK-NEXT:    mov z17.d, z2.d
+; CHECK-NEXT:    mov z18.d, z3.d
+; CHECK-NEXT:    mov z4.d, z5.d
+; CHECK-NEXT:    mov z5.d, z6.d
+; CHECK-NEXT:    mov z6.d, z7.d
+; CHECK-NEXT:    mov z7.d, z20.d
+; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z9.d
+; CHECK-NEXT:    mov z5.d, z10.d
+; CHECK-NEXT:    mov z6.d, z11.d
+; CHECK-NEXT:    mov z7.d, z24.d
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z13.d
+; CHECK-NEXT:    mov z5.d, z14.d
+; CHECK-NEXT:    mov z6.d, z15.d
+; CHECK-NEXT:    mov z7.d, z28.d
 ; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z8.b - z11.b }, z0.b[0]
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b[0]
-; CHECK-NEXT:    ldr z15, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #9
+; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
+; CHECK-NEXT:    addvl sp, sp, #12
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -1258,6 +1341,7 @@ entry:
   tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.sdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 16 x i8> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -1363,15 +1447,21 @@ define void @sudot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z8.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z8.b, z9.b }, z0.b[0]
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    mov z2.d, z1.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
+; CHECK-NEXT:    mov z2.d, z9.d
+; CHECK-NEXT:    mov z3.d, z10.d
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1386,6 +1476,7 @@ entry:
   %6 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %4, 1
   tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %2, <vscale x 16 x i8> %5, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %6, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 16 x i8> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -1443,38 +1534,58 @@ define void @sudot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-LABEL: sudot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-9
-; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    addvl sp, sp, #-12
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z15, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z14, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x10, x9, x1
-; CHECK-NEXT:    str z13, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    mov z16.d, z1.d
+; CHECK-NEXT:    mov z17.d, z2.d
+; CHECK-NEXT:    mov z18.d, z3.d
+; CHECK-NEXT:    mov z4.d, z5.d
+; CHECK-NEXT:    mov z5.d, z6.d
+; CHECK-NEXT:    mov z6.d, z7.d
+; CHECK-NEXT:    mov z7.d, z20.d
+; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z9.d
+; CHECK-NEXT:    mov z5.d, z10.d
+; CHECK-NEXT:    mov z6.d, z11.d
+; CHECK-NEXT:    mov z7.d, z24.d
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z13.d
+; CHECK-NEXT:    mov z5.d, z14.d
+; CHECK-NEXT:    mov z6.d, z15.d
+; CHECK-NEXT:    mov z7.d, z28.d
 ; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z8.b - z11.b }, z0.b[0]
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b[0]
-; CHECK-NEXT:    ldr z15, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #9
+; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
+; CHECK-NEXT:    addvl sp, sp, #12
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -1508,6 +1619,7 @@ entry:
   tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.sudot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 16 x i8> %scalable_arg, ptr %ptr
   ret void
 }
 
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll
index e7d1050b607992..def924b9631dea 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll
@@ -99,7 +99,7 @@ entry:
   ret void
 }
 
-define void @svdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8> %scalable_arg) #0 {
+define void @svdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16> %scalable_arg) #0 {
 ; CHECK-LABEL: svdot_form_2x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -107,16 +107,22 @@ define void @svdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    add x9, x0, x1
-; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1h { z0.h, z8.h }, pn8/z, [x0]
-; CHECK-NEXT:    ld1h { z1.h, z9.h }, pn8/z, [x9]
-; CHECK-NEXT:    svdot za.s[w8, 0, vgx2], { z0.h, z1.h }, z0.h[0]
-; CHECK-NEXT:    svdot za.s[w8, 0, vgx2], { z8.h, z9.h }, z0.h[0]
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1h { z1.h, z9.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z2.h, z10.h }, pn8/z, [x9]
+; CHECK-NEXT:    mov z2.d, z1.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    svdot za.s[w8, 0, vgx2], { z2.h, z3.h }, z0.h[0]
+; CHECK-NEXT:    mov z2.d, z9.d
+; CHECK-NEXT:    mov z3.d, z10.d
+; CHECK-NEXT:    svdot za.s[w8, 0, vgx2], { z2.h, z3.h }, z0.h[0]
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -131,6 +137,7 @@ entry:
   %6 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %4, 1
   tail call void @llvm.aarch64.sme.svdot.lane.za32.vg1x2.nxv8i16(i32 0, <vscale x 8 x i16> %2, <vscale x 8 x i16> %5, <vscale x 8 x i16> poison, i32 0)
   tail call void @llvm.aarch64.sme.svdot.lane.za32.vg1x2.nxv8i16(i32 0, <vscale x 8 x i16> %3, <vscale x 8 x i16> %6, <vscale x 8 x i16> poison, i32 0)
+  store <vscale x 8 x i16> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -184,42 +191,62 @@ entry:
   ret void
 }
 
-define void @svdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8> %scalable_arg) #0 {
+define void @svdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16> %scalable_arg) #0 {
 ; CHECK-LABEL: svdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-9
-; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    addvl sp, sp, #-12
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z15, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z14, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x10, x9, x1
-; CHECK-NEXT:    str z13, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    mov z16.d, z1.d
+; CHECK-NEXT:    mov z17.d, z2.d
+; CHECK-NEXT:    mov z18.d, z3.d
+; CHECK-NEXT:    mov z4.d, z5.d
+; CHECK-NEXT:    mov z5.d, z6.d
+; CHECK-NEXT:    mov z6.d, z7.d
+; CHECK-NEXT:    mov z7.d, z20.d
+; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
+; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z9.d
+; CHECK-NEXT:    mov z5.d, z10.d
+; CHECK-NEXT:    mov z6.d, z11.d
+; CHECK-NEXT:    mov z7.d, z24.d
+; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z13.d
+; CHECK-NEXT:    mov z5.d, z14.d
+; CHECK-NEXT:    mov z6.d, z15.d
+; CHECK-NEXT:    mov z7.d, z28.d
 ; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z8.b - z11.b }, z0.b[0]
-; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b[0]
-; CHECK-NEXT:    ldr z15, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #9
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    addvl sp, sp, #12
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -253,6 +280,7 @@ entry:
   tail call void @llvm.aarch64.sme.svdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.svdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.svdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 8 x i16> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -322,7 +350,7 @@ entry:
   ret void
 }
 
-define void @uvdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8> %scalable_arg) #0 {
+define void @uvdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16> %scalable_arg) #0 {
 ; CHECK-LABEL: uvdot_form_2x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -330,16 +358,22 @@ define void @uvdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    add x9, x0, x1
-; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1h { z0.h, z8.h }, pn8/z, [x0]
-; CHECK-NEXT:    ld1h { z1.h, z9.h }, pn8/z, [x9]
-; CHECK-NEXT:    uvdot za.s[w8, 0, vgx2], { z0.h, z1.h }, z0.h[0]
-; CHECK-NEXT:    uvdot za.s[w8, 0, vgx2], { z8.h, z9.h }, z0.h[0]
-; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1h { z1.h, z9.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z2.h, z10.h }, pn8/z, [x9]
+; CHECK-NEXT:    mov z2.d, z1.d
+; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    uvdot za.s[w8, 0, vgx2], { z2.h, z3.h }, z0.h[0]
+; CHECK-NEXT:    mov z2.d, z9.d
+; CHECK-NEXT:    mov z3.d, z10.d
+; CHECK-NEXT:    uvdot za.s[w8, 0, vgx2], { z2.h, z3.h }, z0.h[0]
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -354,6 +388,7 @@ entry:
   %6 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %4, 1
   tail call void @llvm.aarch64.sme.uvdot.lane.za32.vg1x2.nxv8i16(i32 0, <vscale x 8 x i16> %2, <vscale x 8 x i16> %5, <vscale x 8 x i16> poison, i32 0)
   tail call void @llvm.aarch64.sme.uvdot.lane.za32.vg1x2.nxv8i16(i32 0, <vscale x 8 x i16> %3, <vscale x 8 x i16> %6, <vscale x 8 x i16> poison, i32 0)
+  store <vscale x 8 x i16> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -407,42 +442,62 @@ entry:
   ret void
 }
 
-define void @uvdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8> %scalable_arg) #0 {
+define void @uvdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16> %scalable_arg) #0 {
 ; CHECK-LABEL: uvdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-9
-; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    addvl sp, sp, #-12
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z15, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z14, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x10, x9, x1
-; CHECK-NEXT:    str z13, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    mov z16.d, z1.d
+; CHECK-NEXT:    mov z17.d, z2.d
+; CHECK-NEXT:    mov z18.d, z3.d
+; CHECK-NEXT:    mov z4.d, z5.d
+; CHECK-NEXT:    mov z5.d, z6.d
+; CHECK-NEXT:    mov z6.d, z7.d
+; CHECK-NEXT:    mov z7.d, z20.d
+; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
+; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z9.d
+; CHECK-NEXT:    mov z5.d, z10.d
+; CHECK-NEXT:    mov z6.d, z11.d
+; CHECK-NEXT:    mov z7.d, z24.d
+; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z13.d
+; CHECK-NEXT:    mov z5.d, z14.d
+; CHECK-NEXT:    mov z6.d, z15.d
+; CHECK-NEXT:    mov z7.d, z28.d
 ; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z8.b - z11.b }, z0.b[0]
-; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b[0]
-; CHECK-NEXT:    ldr z15, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #9
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    addvl sp, sp, #12
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -476,6 +531,7 @@ entry:
   tail call void @llvm.aarch64.sme.uvdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.uvdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.uvdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 8 x i16> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -544,42 +600,62 @@ entry:
   ret void
 }
 
-define void @suvdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8> %scalable_arg) #0 {
+define void @suvdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16> %scalable_arg) #0 {
 ; CHECK-LABEL: suvdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-9
-; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    addvl sp, sp, #-12
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z15, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z14, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x10, x9, x1
-; CHECK-NEXT:    str z13, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    mov z16.d, z1.d
+; CHECK-NEXT:    mov z17.d, z2.d
+; CHECK-NEXT:    mov z18.d, z3.d
+; CHECK-NEXT:    mov z4.d, z5.d
+; CHECK-NEXT:    mov z5.d, z6.d
+; CHECK-NEXT:    mov z6.d, z7.d
+; CHECK-NEXT:    mov z7.d, z20.d
+; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
+; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z9.d
+; CHECK-NEXT:    mov z5.d, z10.d
+; CHECK-NEXT:    mov z6.d, z11.d
+; CHECK-NEXT:    mov z7.d, z24.d
+; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z13.d
+; CHECK-NEXT:    mov z5.d, z14.d
+; CHECK-NEXT:    mov z6.d, z15.d
+; CHECK-NEXT:    mov z7.d, z28.d
 ; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z8.b - z11.b }, z0.b[0]
-; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b[0]
-; CHECK-NEXT:    ldr z15, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #9
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    addvl sp, sp, #12
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -613,6 +689,7 @@ entry:
   tail call void @llvm.aarch64.sme.suvdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.suvdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.suvdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 8 x i16> %scalable_arg, ptr %ptr
   ret void
 }
 
@@ -681,42 +758,62 @@ entry:
   ret void
 }
 
-define void @usvdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8> %scalable_arg) #0 {
+define void @usvdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16> %scalable_arg) #0 {
 ; CHECK-LABEL: usvdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-9
-; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    addvl sp, sp, #-12
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z15, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z14, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    add x10, x9, x1
-; CHECK-NEXT:    str z13, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    mov z16.d, z1.d
+; CHECK-NEXT:    mov z17.d, z2.d
+; CHECK-NEXT:    mov z18.d, z3.d
+; CHECK-NEXT:    mov z4.d, z5.d
+; CHECK-NEXT:    mov z5.d, z6.d
+; CHECK-NEXT:    mov z6.d, z7.d
+; CHECK-NEXT:    mov z7.d, z20.d
+; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
+; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z9.d
+; CHECK-NEXT:    mov z5.d, z10.d
+; CHECK-NEXT:    mov z6.d, z11.d
+; CHECK-NEXT:    mov z7.d, z24.d
+; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
+; CHECK-NEXT:    mov z4.d, z13.d
+; CHECK-NEXT:    mov z5.d, z14.d
+; CHECK-NEXT:    mov z6.d, z15.d
+; CHECK-NEXT:    mov z7.d, z28.d
 ; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z8.b - z11.b }, z0.b[0]
-; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b[0]
-; CHECK-NEXT:    ldr z15, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #9
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    addvl sp, sp, #12
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -750,6 +847,7 @@ entry:
   tail call void @llvm.aarch64.sme.usvdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.usvdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> poison, i32 0)
   tail call void @llvm.aarch64.sme.usvdot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> poison, i32 0)
+  store <vscale x 8 x i16> %scalable_arg, ptr %ptr
   ret void
 }
 

>From b17fa8cd856092f74e64de97093a6cd88386e30b Mon Sep 17 00:00:00 2001
From: Kerry McLaughlin <kerry.mclaughlin at arm.com>
Date: Wed, 15 Jan 2025 13:31:22 +0000
Subject: [PATCH 2/3] [AArch64][SME] Make getRegAllocationHints stricter for
 multi-vector loads.

getRegAllocationHints looks for ZPR2StridedOrContiguous load instructions
which are used by FORM_TRANSPOSED_REG_TUPLE pseudos and adds all
strided registers from this class to the list of hints.
This patch changes getRegAllocationHints to restrict this list:
 - If the pseudo uses ZPRMul class, the first load must begin with a register
   which is a multiple of 2 or 4.
 - Only add a strided register to the list if it does not already have any
   live intervals.
---
 .../Target/AArch64/AArch64RegisterInfo.cpp    |  79 +++-
 .../AArch64/sme2-intrinsics-int-dots.ll       | 360 +++++++-----------
 .../CodeGen/AArch64/sme2-intrinsics-vdot.ll   | 324 ++++++----------
 .../AArch64/sme2-multivec-regalloc.mir        | 183 +++++++++
 4 files changed, 511 insertions(+), 435 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir

diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 5973b63b5a8024..aac1dc9cb5c062 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -20,6 +20,7 @@
 #include "MCTargetDesc/AArch64InstPrinter.h"
 #include "llvm/ADT/BitVector.h"
 #include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/CodeGen/LiveRegMatrix.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -1107,23 +1108,83 @@ bool AArch64RegisterInfo::getRegAllocationHints(
   // FORM_TRANSPOSED_REG_TUPLE pseudo, we want to favour reducing copy
   // instructions over reducing the number of clobbered callee-save registers,
   // so we add the strided registers as a hint.
+  const MachineInstr *TupleInst = nullptr;
   unsigned RegID = MRI.getRegClass(VirtReg)->getID();
   // Look through uses of the register for FORM_TRANSPOSED_REG_TUPLE.
   if ((RegID == AArch64::ZPR2StridedOrContiguousRegClassID ||
        RegID == AArch64::ZPR4StridedOrContiguousRegClassID) &&
-      any_of(MRI.use_nodbg_instructions(VirtReg), [](const MachineInstr &Use) {
-        return Use.getOpcode() ==
-                   AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO ||
-               Use.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO;
+      any_of(MRI.use_nodbg_instructions(VirtReg), [&TupleInst](
+                                                      const MachineInstr &Use) {
+        bool IsTuple =
+            Use.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO ||
+            Use.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO;
+        TupleInst = &Use;
+        return IsTuple;
       })) {
-    const TargetRegisterClass *StridedRC =
-        RegID == AArch64::ZPR2StridedOrContiguousRegClassID
-            ? &AArch64::ZPR2StridedRegClass
-            : &AArch64::ZPR4StridedRegClass;
+    unsigned LdOps = TupleInst->getNumOperands() - 1;
+    const TargetRegisterClass *StridedRC = LdOps == 2
+                                               ? &AArch64::ZPR2StridedRegClass
+                                               : &AArch64::ZPR4StridedRegClass;
 
+    SmallVector<MCPhysReg, 4> StridedOrder;
     for (MCPhysReg Reg : Order)
       if (StridedRC->contains(Reg))
-        Hints.push_back(Reg);
+        StridedOrder.push_back(Reg);
+
+    int OpIdx = TupleInst->findRegisterUseOperandIdx(VirtReg, this);
+    if (OpIdx == -1)
+      return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
+                                                       MF, VRM);
+
+    unsigned TupleID =
+        MRI.getRegClass(TupleInst->getOperand(0).getReg())->getID();
+    bool IsMulZPR = TupleID == AArch64::ZPR2Mul2RegClassID ||
+                    TupleID == AArch64::ZPR4Mul4RegClassID;
+
+    if (OpIdx == 1) {
+      for (unsigned I = 0; I < StridedOrder.size(); ++I) {
+        MCPhysReg Reg = StridedOrder[I];
+        unsigned FirstReg = getSubReg(Reg, AArch64::zsub0);
+
+        // If the FORM_TRANSPOSE nodes use the ZPRMul classes, the starting
+        // register of the first load should be a multiple of 2 or 4.
+        if (IsMulZPR &&
+            (getSubReg(Reg, AArch64::zsub0) - AArch64::Z0) % LdOps != 0)
+          continue;
+        // Skip this register if it has any live intervals assigned.
+        if (Matrix->isPhysRegUsed(Reg))
+          continue;
+
+        bool CanAssign = true;
+        for (unsigned Next = 1; Next < LdOps; ++Next) {
+          // Ensure we can assign enough registers from the list for all loads.
+          if (I + Next >= StridedOrder.size()) {
+            CanAssign = false;
+            break;
+          }
+          // Ensure the subsequent registers are not live and that the starting
+          // sub-registers are sequential.
+          MCPhysReg NextReg = StridedOrder[I + Next];
+          if (Matrix->isPhysRegUsed(NextReg) ||
+              (getSubReg(NextReg, AArch64::zsub0) != FirstReg + Next)) {
+            CanAssign = false;
+            break;
+          }
+        }
+        if (CanAssign)
+          Hints.push_back(Reg);
+      }
+    } else if (VRM->hasPhys(TupleInst->getOperand(1).getReg())) {
+      // This is not the first load in the sequence. Find the register
+      // assigned to the first and match to a strided reg in the list.
+      MCPhysReg FirstLoadPhysReg =
+          VRM->getPhys(TupleInst->getOperand(1).getReg());
+      for (unsigned I = 0; I < StridedOrder.size(); ++I) {
+        if (StridedOrder[I] == FirstLoadPhysReg &&
+            (I + (OpIdx - 1) < StridedOrder.size()))
+          Hints.push_back(StridedOrder[I + (OpIdx - 1)]);
+      }
+    }
 
     return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
                                                      VRM);
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
index 4aeaa6feaed048..4abc28c4863421 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
@@ -615,19 +615,15 @@ define void @udot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    mov z2.d, z1.d
-; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    str z10, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z3.b, z11.b }, pn8/z, [x0, x1]
 ; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
-; CHECK-NEXT:    mov z2.d, z9.d
-; CHECK-NEXT:    mov z3.d, z10.d
-; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z10.b, z11.b }, z0.b[0]
+; CHECK-NEXT:    ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
@@ -702,58 +698,40 @@ define void @udot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-LABEL: udot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-12
-; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    addvl sp, sp, #-9
 ; CHECK-NEXT:    lsl x9, x1, #1
-; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z23, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    mov z16.d, z1.d
-; CHECK-NEXT:    mov z17.d, z2.d
-; CHECK-NEXT:    mov z18.d, z3.d
-; CHECK-NEXT:    mov z4.d, z5.d
-; CHECK-NEXT:    mov z5.d, z6.d
-; CHECK-NEXT:    mov z6.d, z7.d
-; CHECK-NEXT:    mov z7.d, z20.d
-; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    str z22, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z21, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z20, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z19, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z18, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
 ; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
-; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z9.d
-; CHECK-NEXT:    mov z5.d, z10.d
-; CHECK-NEXT:    mov z6.d, z11.d
-; CHECK-NEXT:    mov z7.d, z24.d
-; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z13.d
-; CHECK-NEXT:    mov z5.d, z14.d
-; CHECK-NEXT:    mov z6.d, z15.d
-; CHECK-NEXT:    mov z7.d, z28.d
-; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
+; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
-; CHECK-NEXT:    addvl sp, sp, #12
+; CHECK-NEXT:    addvl sp, sp, #9
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -891,19 +869,15 @@ define void @usdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    mov z2.d, z1.d
-; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    str z10, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z3.b, z11.b }, pn8/z, [x0, x1]
 ; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
-; CHECK-NEXT:    mov z2.d, z9.d
-; CHECK-NEXT:    mov z3.d, z10.d
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z10.b, z11.b }, z0.b[0]
+; CHECK-NEXT:    ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
@@ -978,58 +952,40 @@ define void @usdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-LABEL: usdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-12
-; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    addvl sp, sp, #-9
 ; CHECK-NEXT:    lsl x9, x1, #1
-; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z23, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    mov z16.d, z1.d
-; CHECK-NEXT:    mov z17.d, z2.d
-; CHECK-NEXT:    mov z18.d, z3.d
-; CHECK-NEXT:    mov z4.d, z5.d
-; CHECK-NEXT:    mov z5.d, z6.d
-; CHECK-NEXT:    mov z6.d, z7.d
-; CHECK-NEXT:    mov z7.d, z20.d
-; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    str z22, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z21, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z20, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z19, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z18, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
 ; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z9.d
-; CHECK-NEXT:    mov z5.d, z10.d
-; CHECK-NEXT:    mov z6.d, z11.d
-; CHECK-NEXT:    mov z7.d, z24.d
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z13.d
-; CHECK-NEXT:    mov z5.d, z14.d
-; CHECK-NEXT:    mov z6.d, z15.d
-; CHECK-NEXT:    mov z7.d, z28.d
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
+; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
-; CHECK-NEXT:    addvl sp, sp, #12
+; CHECK-NEXT:    addvl sp, sp, #9
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -1169,19 +1125,15 @@ define void @sdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    mov z2.d, z1.d
-; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    str z10, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z3.b, z11.b }, pn8/z, [x0, x1]
 ; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
-; CHECK-NEXT:    mov z2.d, z9.d
-; CHECK-NEXT:    mov z3.d, z10.d
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z10.b, z11.b }, z0.b[0]
+; CHECK-NEXT:    ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
@@ -1256,58 +1208,40 @@ define void @sdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-LABEL: sdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-12
-; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    addvl sp, sp, #-9
 ; CHECK-NEXT:    lsl x9, x1, #1
-; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z23, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    mov z16.d, z1.d
-; CHECK-NEXT:    mov z17.d, z2.d
-; CHECK-NEXT:    mov z18.d, z3.d
-; CHECK-NEXT:    mov z4.d, z5.d
-; CHECK-NEXT:    mov z5.d, z6.d
-; CHECK-NEXT:    mov z6.d, z7.d
-; CHECK-NEXT:    mov z7.d, z20.d
-; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    str z22, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z21, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z20, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z19, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z18, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
 ; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z9.d
-; CHECK-NEXT:    mov z5.d, z10.d
-; CHECK-NEXT:    mov z6.d, z11.d
-; CHECK-NEXT:    mov z7.d, z24.d
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z13.d
-; CHECK-NEXT:    mov z5.d, z14.d
-; CHECK-NEXT:    mov z6.d, z15.d
-; CHECK-NEXT:    mov z7.d, z28.d
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
+; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
-; CHECK-NEXT:    addvl sp, sp, #12
+; CHECK-NEXT:    addvl sp, sp, #9
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -1447,19 +1381,15 @@ define void @sudot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    mov z2.d, z1.d
-; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    str z10, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z3.b, z11.b }, pn8/z, [x0, x1]
 ; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
-; CHECK-NEXT:    mov z2.d, z9.d
-; CHECK-NEXT:    mov z3.d, z10.d
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z2.b, z3.b }, z0.b[0]
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z10.b, z11.b }, z0.b[0]
+; CHECK-NEXT:    ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
@@ -1534,58 +1464,40 @@ define void @sudot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 16 x i8>
 ; CHECK-LABEL: sudot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-12
-; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    addvl sp, sp, #-9
 ; CHECK-NEXT:    lsl x9, x1, #1
-; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z23, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    mov z16.d, z1.d
-; CHECK-NEXT:    mov z17.d, z2.d
-; CHECK-NEXT:    mov z18.d, z3.d
-; CHECK-NEXT:    mov z4.d, z5.d
-; CHECK-NEXT:    mov z5.d, z6.d
-; CHECK-NEXT:    mov z6.d, z7.d
-; CHECK-NEXT:    mov z7.d, z20.d
-; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    str z22, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z21, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z20, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z19, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z18, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
 ; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z9.d
-; CHECK-NEXT:    mov z5.d, z10.d
-; CHECK-NEXT:    mov z6.d, z11.d
-; CHECK-NEXT:    mov z7.d, z24.d
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z13.d
-; CHECK-NEXT:    mov z5.d, z14.d
-; CHECK-NEXT:    mov z6.d, z15.d
-; CHECK-NEXT:    mov z7.d, z28.d
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
+; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
-; CHECK-NEXT:    addvl sp, sp, #12
+; CHECK-NEXT:    addvl sp, sp, #9
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll
index def924b9631dea..63851dd857f97b 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll
@@ -107,20 +107,16 @@ define void @svdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    add x9, x0, x1
-; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1h { z1.h, z9.h }, pn8/z, [x0]
-; CHECK-NEXT:    ld1h { z2.h, z10.h }, pn8/z, [x9]
-; CHECK-NEXT:    mov z2.d, z1.d
-; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    str z10, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1h { z2.h, z10.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z3.h, z11.h }, pn8/z, [x9]
 ; CHECK-NEXT:    svdot za.s[w8, 0, vgx2], { z2.h, z3.h }, z0.h[0]
-; CHECK-NEXT:    mov z2.d, z9.d
-; CHECK-NEXT:    mov z3.d, z10.d
-; CHECK-NEXT:    svdot za.s[w8, 0, vgx2], { z2.h, z3.h }, z0.h[0]
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    svdot za.s[w8, 0, vgx2], { z10.h, z11.h }, z0.h[0]
+; CHECK-NEXT:    ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
@@ -195,58 +191,40 @@ define void @svdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16>
 ; CHECK-LABEL: svdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-12
-; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    addvl sp, sp, #-9
 ; CHECK-NEXT:    lsl x9, x1, #1
-; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z23, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    mov z16.d, z1.d
-; CHECK-NEXT:    mov z17.d, z2.d
-; CHECK-NEXT:    mov z18.d, z3.d
-; CHECK-NEXT:    mov z4.d, z5.d
-; CHECK-NEXT:    mov z5.d, z6.d
-; CHECK-NEXT:    mov z6.d, z7.d
-; CHECK-NEXT:    mov z7.d, z20.d
-; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    str z22, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z21, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z20, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z19, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z18, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
 ; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
-; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z9.d
-; CHECK-NEXT:    mov z5.d, z10.d
-; CHECK-NEXT:    mov z6.d, z11.d
-; CHECK-NEXT:    mov z7.d, z24.d
-; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z13.d
-; CHECK-NEXT:    mov z5.d, z14.d
-; CHECK-NEXT:    mov z6.d, z15.d
-; CHECK-NEXT:    mov z7.d, z28.d
-; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
+; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
+; CHECK-NEXT:    svdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
+; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
-; CHECK-NEXT:    addvl sp, sp, #12
+; CHECK-NEXT:    addvl sp, sp, #9
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -358,20 +336,16 @@ define void @uvdot_form_2x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16>
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    add x9, x0, x1
-; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1h { z1.h, z9.h }, pn8/z, [x0]
-; CHECK-NEXT:    ld1h { z2.h, z10.h }, pn8/z, [x9]
-; CHECK-NEXT:    mov z2.d, z1.d
-; CHECK-NEXT:    mov z3.d, z2.d
+; CHECK-NEXT:    str z10, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1h { z2.h, z10.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z3.h, z11.h }, pn8/z, [x9]
 ; CHECK-NEXT:    uvdot za.s[w8, 0, vgx2], { z2.h, z3.h }, z0.h[0]
-; CHECK-NEXT:    mov z2.d, z9.d
-; CHECK-NEXT:    mov z3.d, z10.d
-; CHECK-NEXT:    uvdot za.s[w8, 0, vgx2], { z2.h, z3.h }, z0.h[0]
-; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    uvdot za.s[w8, 0, vgx2], { z10.h, z11.h }, z0.h[0]
+; CHECK-NEXT:    ldr z11, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    addvl sp, sp, #3
@@ -446,58 +420,40 @@ define void @uvdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16>
 ; CHECK-LABEL: uvdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-12
-; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    addvl sp, sp, #-9
 ; CHECK-NEXT:    lsl x9, x1, #1
-; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z23, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    mov z16.d, z1.d
-; CHECK-NEXT:    mov z17.d, z2.d
-; CHECK-NEXT:    mov z18.d, z3.d
-; CHECK-NEXT:    mov z4.d, z5.d
-; CHECK-NEXT:    mov z5.d, z6.d
-; CHECK-NEXT:    mov z6.d, z7.d
-; CHECK-NEXT:    mov z7.d, z20.d
-; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    str z22, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z21, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z20, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z19, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z18, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
 ; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
-; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z9.d
-; CHECK-NEXT:    mov z5.d, z10.d
-; CHECK-NEXT:    mov z6.d, z11.d
-; CHECK-NEXT:    mov z7.d, z24.d
-; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z13.d
-; CHECK-NEXT:    mov z5.d, z14.d
-; CHECK-NEXT:    mov z6.d, z15.d
-; CHECK-NEXT:    mov z7.d, z28.d
-; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
+; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
+; CHECK-NEXT:    uvdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
+; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
-; CHECK-NEXT:    addvl sp, sp, #12
+; CHECK-NEXT:    addvl sp, sp, #9
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -604,58 +560,40 @@ define void @suvdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16
 ; CHECK-LABEL: suvdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-12
-; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    addvl sp, sp, #-9
 ; CHECK-NEXT:    lsl x9, x1, #1
-; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z23, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    mov z16.d, z1.d
-; CHECK-NEXT:    mov z17.d, z2.d
-; CHECK-NEXT:    mov z18.d, z3.d
-; CHECK-NEXT:    mov z4.d, z5.d
-; CHECK-NEXT:    mov z5.d, z6.d
-; CHECK-NEXT:    mov z6.d, z7.d
-; CHECK-NEXT:    mov z7.d, z20.d
-; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    str z22, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z21, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z20, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z19, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z18, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
 ; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
-; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z9.d
-; CHECK-NEXT:    mov z5.d, z10.d
-; CHECK-NEXT:    mov z6.d, z11.d
-; CHECK-NEXT:    mov z7.d, z24.d
-; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z13.d
-; CHECK-NEXT:    mov z5.d, z14.d
-; CHECK-NEXT:    mov z6.d, z15.d
-; CHECK-NEXT:    mov z7.d, z28.d
-; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
+; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
+; CHECK-NEXT:    suvdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
+; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
-; CHECK-NEXT:    addvl sp, sp, #12
+; CHECK-NEXT:    addvl sp, sp, #9
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -762,58 +700,40 @@ define void @usvdot_form_4x_tuple_svecc(ptr %ptr, i64 %stride, <vscale x 8 x i16
 ; CHECK-LABEL: usvdot_form_4x_tuple_svecc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-12
-; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    addvl sp, sp, #-9
 ; CHECK-NEXT:    lsl x9, x1, #1
-; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    st1b { z18.b, z19.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z16.b, z17.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    st1b { z14.b, z15.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z23, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
-; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    mov z16.d, z1.d
-; CHECK-NEXT:    mov z17.d, z2.d
-; CHECK-NEXT:    mov z18.d, z3.d
-; CHECK-NEXT:    mov z4.d, z5.d
-; CHECK-NEXT:    mov z5.d, z6.d
-; CHECK-NEXT:    mov z6.d, z7.d
-; CHECK-NEXT:    mov z7.d, z20.d
-; CHECK-NEXT:    mov z19.d, z16.d
+; CHECK-NEXT:    str z22, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z21, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z20, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z19, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z18, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
 ; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
-; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z9.d
-; CHECK-NEXT:    mov z5.d, z10.d
-; CHECK-NEXT:    mov z6.d, z11.d
-; CHECK-NEXT:    mov z7.d, z24.d
-; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    mov z4.d, z13.d
-; CHECK-NEXT:    mov z5.d, z14.d
-; CHECK-NEXT:    mov z6.d, z15.d
-; CHECK-NEXT:    mov z7.d, z28.d
-; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z4.b - z7.b }, z0.b[0]
-; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ld1b { z18.b, z19.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z16.b, z17.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ld1b { z14.b, z15.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
+; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
+; CHECK-NEXT:    usvdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
+; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
-; CHECK-NEXT:    addvl sp, sp, #12
+; CHECK-NEXT:    addvl sp, sp, #9
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir
new file mode 100644
index 00000000000000..b5abc5d4ee67c4
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir
@@ -0,0 +1,183 @@
+# RUN: llc -force-streaming -verify-machineinstrs -enable-subreg-liveness -start-before=greedy %s -o - | FileCheck %s
+
+# No available group of four strided x4 registers, fall back on default allocation order
+--- |
+  ; ModuleID = '<stdin>'
+  source_filename = "<stdin>"
+  target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+  target triple = "aarch64-linux-gnu"
+
+  ; Function Attrs: nounwind
+  define void @form_4x_tuple_many_live(ptr %ptr, i64 %stride, <vscale x 16 x i8> %scalable_arg) #0 {
+  entry:
+    %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
+    %1 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr)
+    %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
+    %3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
+    %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 2
+    %5 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 3
+    %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
+    %6 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2)
+    %7 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 0
+    %8 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 1
+    %9 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 2
+    %10 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 3
+    %mul3 = shl i64 %stride, 1
+    %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3
+    %11 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4)
+    %12 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 0
+    %13 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 1
+    %14 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 2
+    %15 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 3
+    %mul5 = mul i64 %stride, 3
+    %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5
+    %16 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6)
+    %17 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 0
+    %18 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 1
+    %19 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 2
+    %20 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 3
+    tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %2, <vscale x 16 x i8> %7, <vscale x 16 x i8> %12, <vscale x 16 x i8> %17, <vscale x 16 x i8> poison, i32 0)
+    tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> poison, i32 0)
+    tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> poison, i32 0)
+    tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> poison, i32 0)
+    store <vscale x 16 x i8> %scalable_arg, ptr %ptr, align 16
+    ret void
+  }
+
+  ; Function Attrs: nocallback nofree nosync nounwind willreturn
+  declare void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32 immarg) #1
+
+  ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: read)
+  declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount"), ptr) #2
+
+  ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+  declare target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() #3
+
+  attributes #0 = { nounwind "target-features"="+sme2" }
+  attributes #1 = { nocallback nofree nosync nounwind willreturn }
+  attributes #2 = { nocallback nofree nosync nounwind willreturn memory(argmem: read) }
+  attributes #3 = { nocallback nofree nosync nounwind willreturn memory(none) }
+
+...
+---
+name:            form_4x_tuple_many_live
+alignment:       4
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+noPhis:          true
+isSSA:           false
+noVRegs:         false
+hasFakeUses:     false
+callsEHReturn:   false
+callsUnwindInit: false
+hasEHCatchret:   false
+hasEHScopes:     false
+hasEHFunclets:   false
+isOutlined:      false
+debugInstrRef:   false
+failsVerification: false
+tracksDebugUserValues: false
+liveins:
+  - { reg: '$x0', virtual-reg: '%0' }
+  - { reg: '$x1', virtual-reg: '%1' }
+  - { reg: '$z0', virtual-reg: '%2' }
+  - { reg: '$z17', virtual-reg: '%3' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       0
+  offsetAdjustment: 0
+  maxAlignment:    1
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  functionContext: ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  hasTailCall:     false
+  isCalleeSavedInfoValid: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:           []
+entry_values:    []
+callSites:       []
+debugValueSubstitutions: []
+constants:       []
+machineFunctionInfo: {}
+body:             |
+  bb.0.entry:
+    liveins: $x0, $x1, $z0, $z17
+
+    ; CHECK-LABEL: form_4x_tuple_many_live
+    ; CHECK: stp	d11, d10, [sp, #-32]!
+    ; CHECK-NEXT: lsl	x9, x1, #1
+    ; CHECK-NEXT: stp	d9, d8, [sp, #16]
+    ; CHECK-NEXT: ptrue	pn8.b
+    ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
+    ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x1]
+    ; CHECK-NEXT: mov	w8, wzr
+    ; CHECK-NEXT: add	x10, x9, x1
+    ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x9]
+    ; CHECK-NEXT: ptrue	p0.b
+    ; CHECK-NEXT: ld1b { z4.b - z7.b }, pn8/z, [x0, x10]
+    ; CHECK-NEXT: mov	z8.d, z16.d
+    ; CHECK-NEXT: mov	z9.d, z18.d
+    ; CHECK-NEXT: mov	z21.d, z22.d
+    ; CHECK-NEXT: mov	z10.d, z19.d
+    ; CHECK-NEXT: mov	z22.d, z23.d
+    ; CHECK-NEXT: mov	z25.d, z26.d
+    ; CHECK-NEXT: mov	z11.d, z4.d
+    ; CHECK-NEXT: mov	z23.d, z5.d
+    ; CHECK-NEXT: mov	z26.d, z27.d
+    ; CHECK-NEXT: mov	z27.d, z6.d
+    ; CHECK-NEXT: mov	z29.d, z30.d
+    ; CHECK-NEXT: mov	z30.d, z31.d
+    ; CHECK-NEXT: mov	z31.d, z7.d
+    ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z8.b - z11.b }, z0.b[0]
+    ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
+    ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
+    ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
+    ; CHECK-NEXT: ldp	d9, d8, [sp, #16]
+    ; CHECK-NEXT: st1b { z0.b }, p0, [x0]
+    ; CHECK-NEXT: st1b { z17.b }, p0, [x0]
+    ; CHECK-NEXT: ldp	d11, d10, [sp], #32
+    ; CHECK-NEXT: ret
+
+    %2:zpr = COPY $z0
+    %3:zpr = COPY $z17
+    %1:gpr64 = COPY $x1
+    %0:gpr64common = COPY $x0
+    %27:matrixindexgpr32_8_11 = COPY $wzr
+    %14:gpr64 = UBFMXri %1, 63, 62
+    %pred:pnr_p8to15 = PTRUE_C_B implicit $vg
+    %4:zpr4stridedorcontiguous = LD1B_4Z_IMM_PSEUDO %pred, %0, 0
+    %20:gpr64 = ADDXrr %14, %1
+    %9:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %1
+    %15:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %14
+    %21:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %20
+    %26:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %4.zsub0, %9.zsub0, %15.zsub0, %21.zsub0
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %27, 0, %26, undef %28:zpr_4b, 0
+    %29:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %4.zsub1, %9.zsub1, %15.zsub1, %21.zsub1
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %27, 0, %29, undef %30:zpr_4b, 0
+    %31:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %4.zsub2, %9.zsub2, %15.zsub2, %21.zsub2
+    %35:ppr_3b = PTRUE_B 31, implicit $vg
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %27, 0, %31, undef %32:zpr_4b, 0
+    %33:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %4.zsub3, %9.zsub3, %15.zsub3, %21.zsub3
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %27, 0, %33, undef %34:zpr_4b, 0
+    ST1B_IMM %2, %35, %0, 0 :: (store (<vscale x 1 x s128>) into %ir.ptr)
+    ST1B_IMM %3, %35, %0, 0 :: (store (<vscale x 1 x s128>) into %ir.ptr)
+    RET_ReallyLR
+
+...

>From b6db33476a221ae358b2c030197f7258bf24d2d6 Mon Sep 17 00:00:00 2001
From: Kerry McLaughlin <kerry.mclaughlin at arm.com>
Date: Fri, 17 Jan 2025 10:19:39 +0000
Subject: [PATCH 3/3] - Instead of finding a hint based on the operand index
 from the FORM_TRANSPOSED   pseudo, find hints based on whether any operands
 have been allocated yet. - Remove any_of which finds FORM_TRANSPOSED uses of
 VirtReg & instead iterate   through all uses. - Clean up
 sme2-multivec-regalloc.mir & add new test which changes the   allocation
 order of the load instructions.

---
 .../Target/AArch64/AArch64RegisterInfo.cpp    | 103 +++----
 .../AArch64/sme2-multivec-regalloc.mir        | 266 ++++++++----------
 2 files changed, 172 insertions(+), 197 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index aac1dc9cb5c062..44eedddc1d8809 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -1108,20 +1108,16 @@ bool AArch64RegisterInfo::getRegAllocationHints(
   // FORM_TRANSPOSED_REG_TUPLE pseudo, we want to favour reducing copy
   // instructions over reducing the number of clobbered callee-save registers,
   // so we add the strided registers as a hint.
-  const MachineInstr *TupleInst = nullptr;
   unsigned RegID = MRI.getRegClass(VirtReg)->getID();
   // Look through uses of the register for FORM_TRANSPOSED_REG_TUPLE.
-  if ((RegID == AArch64::ZPR2StridedOrContiguousRegClassID ||
-       RegID == AArch64::ZPR4StridedOrContiguousRegClassID) &&
-      any_of(MRI.use_nodbg_instructions(VirtReg), [&TupleInst](
-                                                      const MachineInstr &Use) {
-        bool IsTuple =
-            Use.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO ||
-            Use.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO;
-        TupleInst = &Use;
-        return IsTuple;
-      })) {
-    unsigned LdOps = TupleInst->getNumOperands() - 1;
+  for (const MachineInstr &Use : MRI.use_nodbg_instructions(VirtReg)) {
+    if ((RegID != AArch64::ZPR2StridedOrContiguousRegClassID &&
+         RegID != AArch64::ZPR4StridedOrContiguousRegClassID) ||
+        (Use.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
+         Use.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO))
+      continue;
+
+    unsigned LdOps = Use.getNumOperands() - 1;
     const TargetRegisterClass *StridedRC = LdOps == 2
                                                ? &AArch64::ZPR2StridedRegClass
                                                : &AArch64::ZPR4StridedRegClass;
@@ -1131,63 +1127,76 @@ bool AArch64RegisterInfo::getRegAllocationHints(
       if (StridedRC->contains(Reg))
         StridedOrder.push_back(Reg);
 
-    int OpIdx = TupleInst->findRegisterUseOperandIdx(VirtReg, this);
-    if (OpIdx == -1)
-      return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
-                                                       MF, VRM);
+    auto GetRegStartingAt = [&](MCPhysReg FirstReg) -> MCPhysReg {
+      for (MCPhysReg Strided : StridedOrder)
+        if (getSubReg(Strided, AArch64::zsub0) == FirstReg)
+          return Strided;
+      return (MCPhysReg)AArch64::NoRegister;
+    };
+
+    int OpIdx = Use.findRegisterUseOperandIdx(VirtReg, this);
+    assert(OpIdx != -1 && "Expected operand index from register use.");
 
-    unsigned TupleID =
-        MRI.getRegClass(TupleInst->getOperand(0).getReg())->getID();
+    unsigned TupleID = MRI.getRegClass(Use.getOperand(0).getReg())->getID();
     bool IsMulZPR = TupleID == AArch64::ZPR2Mul2RegClassID ||
                     TupleID == AArch64::ZPR4Mul4RegClassID;
 
-    if (OpIdx == 1) {
+    unsigned AssignedOp = 0;
+    if (!any_of(make_range(Use.operands_begin() + 1, Use.operands_end()),
+                [&](const MachineOperand &Op) {
+                  if (!VRM->hasPhys(Op.getReg()))
+                    return false;
+                  AssignedOp = Op.getOperandNo();
+                  return true;
+                })) {
+      // There are no registers already assigned to any of the pseudo operands.
+      // Look for a valid starting register for the group.
       for (unsigned I = 0; I < StridedOrder.size(); ++I) {
         MCPhysReg Reg = StridedOrder[I];
         unsigned FirstReg = getSubReg(Reg, AArch64::zsub0);
 
         // If the FORM_TRANSPOSE nodes use the ZPRMul classes, the starting
         // register of the first load should be a multiple of 2 or 4.
-        if (IsMulZPR &&
-            (getSubReg(Reg, AArch64::zsub0) - AArch64::Z0) % LdOps != 0)
+        if (IsMulZPR && (FirstReg - AArch64::Z0) % LdOps != 0)
           continue;
         // Skip this register if it has any live intervals assigned.
         if (Matrix->isPhysRegUsed(Reg))
           continue;
 
-        bool CanAssign = true;
+        // Look for registers in StridedOrder which start with sub-registers
+        // following sequentially from FirstReg. If all are found and none are
+        // already live, add Reg to Hints.
+        MCPhysReg RegToAssign = Reg;
         for (unsigned Next = 1; Next < LdOps; ++Next) {
-          // Ensure we can assign enough registers from the list for all loads.
-          if (I + Next >= StridedOrder.size()) {
-            CanAssign = false;
-            break;
-          }
-          // Ensure the subsequent registers are not live and that the starting
-          // sub-registers are sequential.
-          MCPhysReg NextReg = StridedOrder[I + Next];
-          if (Matrix->isPhysRegUsed(NextReg) ||
-              (getSubReg(NextReg, AArch64::zsub0) != FirstReg + Next)) {
-            CanAssign = false;
+          MCPhysReg Strided = GetRegStartingAt(FirstReg + Next);
+          if (Strided == AArch64::NoRegister ||
+              Matrix->isPhysRegUsed(Strided)) {
+            RegToAssign = AArch64::NoRegister;
             break;
           }
+          if (Next == (unsigned)OpIdx - 1)
+            RegToAssign = Strided;
         }
-        if (CanAssign)
-          Hints.push_back(Reg);
-      }
-    } else if (VRM->hasPhys(TupleInst->getOperand(1).getReg())) {
-      // This is not the first load in the sequence. Find the register
-      // assigned to the first and match to a strided reg in the list.
-      MCPhysReg FirstLoadPhysReg =
-          VRM->getPhys(TupleInst->getOperand(1).getReg());
-      for (unsigned I = 0; I < StridedOrder.size(); ++I) {
-        if (StridedOrder[I] == FirstLoadPhysReg &&
-            (I + (OpIdx - 1) < StridedOrder.size()))
-          Hints.push_back(StridedOrder[I + (OpIdx - 1)]);
+        if (RegToAssign != AArch64::NoRegister)
+          Hints.push_back(RegToAssign);
       }
+    } else {
+      // At least one operand already has a physical register assigned.
+      // Find the starting sub-register of this and use it to work out the
+      // correct strided register to suggest based on the current op index.
+      MCPhysReg TargetStartReg =
+          getSubReg(VRM->getPhys(Use.getOperand(AssignedOp).getReg()),
+                    AArch64::zsub0) +
+          (OpIdx - AssignedOp);
+
+      for (unsigned I = 0; I < StridedOrder.size(); ++I)
+        if (getSubReg(StridedOrder[I], AArch64::zsub0) == TargetStartReg)
+          Hints.push_back(StridedOrder[I]);
     }
 
-    return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
-                                                     VRM);
+    if (!Hints.empty())
+      return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
+                                                       MF, VRM);
   }
 
   for (MachineInstr &MI : MRI.def_instructions(VirtReg)) {
diff --git a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir
index b5abc5d4ee67c4..a4bbee41cfff84 100644
--- a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir
+++ b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir
@@ -1,183 +1,149 @@
-# RUN: llc -force-streaming -verify-machineinstrs -enable-subreg-liveness -start-before=greedy %s -o - | FileCheck %s
+# RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs -enable-subreg-liveness -start-before=greedy %s -o - | FileCheck %s
 
 # No available group of four strided x4 registers, fall back on default allocation order
---- |
-  ; ModuleID = '<stdin>'
-  source_filename = "<stdin>"
-  target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
-  target triple = "aarch64-linux-gnu"
-
-  ; Function Attrs: nounwind
-  define void @form_4x_tuple_many_live(ptr %ptr, i64 %stride, <vscale x 16 x i8> %scalable_arg) #0 {
-  entry:
-    %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
-    %1 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr)
-    %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
-    %3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
-    %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 2
-    %5 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 3
-    %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
-    %6 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2)
-    %7 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 0
-    %8 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 1
-    %9 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 2
-    %10 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 3
-    %mul3 = shl i64 %stride, 1
-    %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3
-    %11 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4)
-    %12 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 0
-    %13 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 1
-    %14 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 2
-    %15 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 3
-    %mul5 = mul i64 %stride, 3
-    %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5
-    %16 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6)
-    %17 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 0
-    %18 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 1
-    %19 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 2
-    %20 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 3
-    tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %2, <vscale x 16 x i8> %7, <vscale x 16 x i8> %12, <vscale x 16 x i8> %17, <vscale x 16 x i8> poison, i32 0)
-    tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> poison, i32 0)
-    tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> poison, i32 0)
-    tail call void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> poison, i32 0)
-    store <vscale x 16 x i8> %scalable_arg, ptr %ptr, align 16
-    ret void
-  }
-
-  ; Function Attrs: nocallback nofree nosync nounwind willreturn
-  declare void @llvm.aarch64.sme.udot.lane.za32.vg1x4.nxv16i8(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32 immarg) #1
-
-  ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: read)
-  declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount"), ptr) #2
-
-  ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
-  declare target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() #3
-
-  attributes #0 = { nounwind "target-features"="+sme2" }
-  attributes #1 = { nocallback nofree nosync nounwind willreturn }
-  attributes #2 = { nocallback nofree nosync nounwind willreturn memory(argmem: read) }
-  attributes #3 = { nocallback nofree nosync nounwind willreturn memory(none) }
-
-...
 ---
 name:            form_4x_tuple_many_live
-alignment:       4
-exposesReturnsTwice: false
-legalized:       false
-regBankSelected: false
-selected:        false
-failedISel:      false
 tracksRegLiveness: true
-hasWinCFI:       false
-noPhis:          true
-isSSA:           false
-noVRegs:         false
-hasFakeUses:     false
-callsEHReturn:   false
-callsUnwindInit: false
-hasEHCatchret:   false
-hasEHScopes:     false
-hasEHFunclets:   false
-isOutlined:      false
-debugInstrRef:   false
-failsVerification: false
-tracksDebugUserValues: false
 liveins:
   - { reg: '$x0', virtual-reg: '%0' }
   - { reg: '$x1', virtual-reg: '%1' }
   - { reg: '$z0', virtual-reg: '%2' }
   - { reg: '$z17', virtual-reg: '%3' }
-frameInfo:
-  isFrameAddressTaken: false
-  isReturnAddressTaken: false
-  hasStackMap:     false
-  hasPatchPoint:   false
-  stackSize:       0
-  offsetAdjustment: 0
-  maxAlignment:    1
-  adjustsStack:    false
-  hasCalls:        false
-  stackProtector:  ''
-  functionContext: ''
-  maxCallFrameSize: 0
-  cvBytesOfCalleeSavedRegisters: 0
-  hasOpaqueSPAdjustment: false
-  hasVAStart:      false
-  hasMustTailInVarArgFunc: false
-  hasTailCall:     false
-  isCalleeSavedInfoValid: false
-  localFrameSize:  0
-  savePoint:       ''
-  restorePoint:    ''
-fixedStack:      []
-stack:           []
-entry_values:    []
-callSites:       []
-debugValueSubstitutions: []
-constants:       []
-machineFunctionInfo: {}
+stack:
+  - { id: 0, name: '', type: default, offset: 0, size: 32, alignment: 16,
+      stack-id: scalable-vector, callee-saved-register: '', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
 body:             |
   bb.0.entry:
     liveins: $x0, $x1, $z0, $z17
 
     ; CHECK-LABEL: form_4x_tuple_many_live
-    ; CHECK: stp	d11, d10, [sp, #-32]!
-    ; CHECK-NEXT: lsl	x9, x1, #1
-    ; CHECK-NEXT: stp	d9, d8, [sp, #16]
-    ; CHECK-NEXT: ptrue	pn8.b
+    ; CHECK: stp d11, d10, [sp, #-48]!
+    ; CHECK-NEXT: stp d9, d8, [sp, #16]
+    ; CHECK-NEXT: str x29, [sp, #32]
+    ; CHECK-NEXT: addvl sp, sp, #-2
+    ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG
+    ; CHECK-NEXT: .cfi_offset w29, -16
+    ; CHECK-NEXT: .cfi_offset b8, -24
+    ; CHECK-NEXT: .cfi_offset b9, -32
+    ; CHECK-NEXT: .cfi_offset b10, -40
+    ; CHECK-NEXT: .cfi_offset b11, -48
+    ; CHECK-NEXT: lsl x9, x1, #1
+    ; CHECK-NEXT: ptrue pn8.b
+    ; CHECK-NEXT: mov w8, wzr
     ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
     ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x1]
-    ; CHECK-NEXT: mov	w8, wzr
-    ; CHECK-NEXT: add	x10, x9, x1
+    ; CHECK-NEXT: ptrue p0.b
+    ; CHECK-NEXT: add x10, x9, x1
     ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x9]
-    ; CHECK-NEXT: ptrue	p0.b
     ; CHECK-NEXT: ld1b { z4.b - z7.b }, pn8/z, [x0, x10]
-    ; CHECK-NEXT: mov	z8.d, z16.d
-    ; CHECK-NEXT: mov	z9.d, z18.d
-    ; CHECK-NEXT: mov	z21.d, z22.d
-    ; CHECK-NEXT: mov	z10.d, z19.d
-    ; CHECK-NEXT: mov	z22.d, z23.d
-    ; CHECK-NEXT: mov	z25.d, z26.d
-    ; CHECK-NEXT: mov	z11.d, z4.d
-    ; CHECK-NEXT: mov	z23.d, z5.d
-    ; CHECK-NEXT: mov	z26.d, z27.d
-    ; CHECK-NEXT: mov	z27.d, z6.d
-    ; CHECK-NEXT: mov	z29.d, z30.d
-    ; CHECK-NEXT: mov	z30.d, z31.d
-    ; CHECK-NEXT: mov	z31.d, z7.d
+    ; CHECK-NEXT: mov z8.d, z16.d
+    ; CHECK-NEXT: mov z9.d, z18.d
+    ; CHECK-NEXT: mov z21.d, z22.d
+    ; CHECK-NEXT: mov z10.d, z19.d
+    ; CHECK-NEXT: mov z22.d, z23.d
+    ; CHECK-NEXT: mov z25.d, z26.d
+    ; CHECK-NEXT: mov z11.d, z4.d
+    ; CHECK-NEXT: mov z23.d, z5.d
+    ; CHECK-NEXT: mov z26.d, z27.d
+    ; CHECK-NEXT: mov z27.d, z6.d
+    ; CHECK-NEXT: mov z29.d, z30.d
+    ; CHECK-NEXT: mov z30.d, z31.d
+    ; CHECK-NEXT: mov z31.d, z7.d
     ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z8.b - z11.b }, z0.b[0]
     ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
     ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
     ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
-    ; CHECK-NEXT: ldp	d9, d8, [sp, #16]
     ; CHECK-NEXT: st1b { z0.b }, p0, [x0]
     ; CHECK-NEXT: st1b { z17.b }, p0, [x0]
-    ; CHECK-NEXT: ldp	d11, d10, [sp], #32
+    ; CHECK-NEXT: addvl sp, sp, #2
+    ; CHECK-NEXT: ldp d9, d8, [sp, #16]
+    ; CHECK-NEXT: ldr x29, [sp, #32]
+    ; CHECK-NEXT: ldp d11, d10, [sp], #48
     ; CHECK-NEXT: ret
 
+    %0:gpr64common = COPY $x0
+    %1:gpr64 = COPY $x1
     %2:zpr = COPY $z0
     %3:zpr = COPY $z17
-    %1:gpr64 = COPY $x1
-    %0:gpr64common = COPY $x0
-    %27:matrixindexgpr32_8_11 = COPY $wzr
-    %14:gpr64 = UBFMXri %1, 63, 62
+    %5:matrixindexgpr32_8_11 = COPY $wzr
+    %6:gpr64 = UBFMXri %1, 63, 62
     %pred:pnr_p8to15 = PTRUE_C_B implicit $vg
-    %4:zpr4stridedorcontiguous = LD1B_4Z_IMM_PSEUDO %pred, %0, 0
-    %20:gpr64 = ADDXrr %14, %1
-    %9:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %1
-    %15:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %14
-    %21:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %20
-    %26:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %4.zsub0, %9.zsub0, %15.zsub0, %21.zsub0
-    $za = UDOT_VG4_M4ZZI_BtoS $za, %27, 0, %26, undef %28:zpr_4b, 0
-    %29:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %4.zsub1, %9.zsub1, %15.zsub1, %21.zsub1
-    $za = UDOT_VG4_M4ZZI_BtoS $za, %27, 0, %29, undef %30:zpr_4b, 0
-    %31:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %4.zsub2, %9.zsub2, %15.zsub2, %21.zsub2
-    %35:ppr_3b = PTRUE_B 31, implicit $vg
-    $za = UDOT_VG4_M4ZZI_BtoS $za, %27, 0, %31, undef %32:zpr_4b, 0
-    %33:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %4.zsub3, %9.zsub3, %15.zsub3, %21.zsub3
-    $za = UDOT_VG4_M4ZZI_BtoS $za, %27, 0, %33, undef %34:zpr_4b, 0
-    ST1B_IMM %2, %35, %0, 0 :: (store (<vscale x 1 x s128>) into %ir.ptr)
-    ST1B_IMM %3, %35, %0, 0 :: (store (<vscale x 1 x s128>) into %ir.ptr)
+    %7:ppr_3b = PTRUE_B 31, implicit $vg
+    %8:gpr64 = ADDXrr %6, %1
+    %9:zpr4stridedorcontiguous  = LD1B_4Z_IMM_PSEUDO %pred, %0, 0
+    %10:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %1
+    %11:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %6
+    %12:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %8
+    %13:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %9.zsub0, %10.zsub0, %11.zsub0, %12.zsub0
+    %14:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %9.zsub1, %10.zsub1, %11.zsub1, %12.zsub1
+    %15:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %9.zsub2, %10.zsub2, %11.zsub2, %12.zsub2
+    %16:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %9.zsub3, %10.zsub3, %11.zsub3, %12.zsub3
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %5, 0, %13, undef %28:zpr_4b, 0
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %5, 0, %14, undef %30:zpr_4b, 0
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %5, 0, %15, undef %32:zpr_4b, 0
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %5, 0, %16, undef %34:zpr_4b, 0
+    ST1B_IMM %2, %7, %0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+    ST1B_IMM %3, %7, %0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
     RET_ReallyLR
+...
+---
+name:            form_4x_tuple_allocation_order
+tracksRegLiveness: true
+liveins:
+  - { reg: '$x0', virtual-reg: '%0' }
+  - { reg: '$x1', virtual-reg: '%1' }
+  - { reg: '$z0', virtual-reg: '%2' }
+stack:
+  - { id: 0, name: '', type: default, offset: 0, size: 32, alignment: 16,
+      stack-id: scalable-vector, callee-saved-register: '', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+body:             |
+  bb.0.entry:
+    liveins: $x0, $x1, $z0
+
+    ; CHECK: str x29, [sp, #-16]!
+    ; CHECK-NEXT: addvl	sp, sp, #-2
+    ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+    ; CHECK-NEXT: .cfi_offset w29, -16
+    ; CHECK-NEXT: lsl x9, x1, #1
+    ; CHECK-NEXT: ptrue	pn8.b
+    ; CHECK-NEXT: mov w8, wzr
+    ; CHECK-NEXT: ptrue	p0.b
+    ; CHECK-NEXT: add x10, x9, x1
+    ; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
+    ; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
+    ; CHECK-NEXT: ld1b { z17.b, z21.b, z25.b, z29.b }, pn8/z, [x0, x1]
+    ; CHECK-NEXT: ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0]
+    ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
+    ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
+    ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
+    ; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
+    ; CHECK-NEXT: st1b { z0.b }, p0, [x0]
+    ; CHECK-NEXT: addvl	sp, sp, #2
+    ; CHECK-NEXT: ldr x29, [sp], #16
+    ; CHECK-NEXT: ret
 
+    %0:gpr64common = COPY $x0
+    %1:gpr64 = COPY $x1
+    %2:zpr = COPY $z0
+    %5:matrixindexgpr32_8_11 = COPY $wzr
+    %6:gpr64 = UBFMXri %1, 63, 62
+    %pred:pnr_p8to15 = PTRUE_C_B implicit $vg
+    %7:ppr_3b = PTRUE_B 31, implicit $vg
+    %8:gpr64 = ADDXrr %6, %1
+    %9:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %8
+    %10:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %6
+    %11:zpr4stridedorcontiguous = LD1B_4Z_PSEUDO %pred, %0, %1
+    %12:zpr4stridedorcontiguous  = LD1B_4Z_IMM_PSEUDO %pred, %0, 0
+    %13:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %12.zsub0, %11.zsub0, %10.zsub0, %9.zsub0
+    %14:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %12.zsub1, %11.zsub1, %10.zsub1, %9.zsub1
+    %15:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %12.zsub2, %11.zsub2, %10.zsub2, %9.zsub2
+    %16:zpr4mul4 = FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO %12.zsub3, %11.zsub3, %10.zsub3, %9.zsub3
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %5, 0, %13, undef %28:zpr_4b, 0
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %5, 0, %14, undef %30:zpr_4b, 0
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %5, 0, %15, undef %32:zpr_4b, 0
+    $za = UDOT_VG4_M4ZZI_BtoS $za, %5, 0, %16, undef %34:zpr_4b, 0
+    ST1B_IMM %2, %7, %0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+    RET_ReallyLR
 ...



More information about the llvm-commits mailing list