[llvm] [AArch64][SME] Create separate FORM_TRANSPOSE pseudos for ZPR & ZPRMul classes (PR #123755)

Kerry McLaughlin via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 23 08:30:51 PST 2025


https://github.com/kmclaughlin-arm updated https://github.com/llvm/llvm-project/pull/123755

>From d1c9a3a109c78f3eeaf3e1688c7fa223cbb42f84 Mon Sep 17 00:00:00 2001
From: Kerry McLaughlin <kerry.mclaughlin at arm.com>
Date: Thu, 16 Jan 2025 14:20:56 +0000
Subject: [PATCH 1/3] Add tests for udot (multi, single) using FORM_TRANSPOSED
 pseudos

---
 .../AArch64/sme2-intrinsics-int-dots.ll       | 511 ++++++++++++++++++
 1 file changed, 511 insertions(+)

diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
index 86ed63d743713c..df422df5c13c81 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
@@ -319,6 +319,39 @@ define void @udot_single_za32_u16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused,
   ret void
 }
 
+define void @udot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, <vscale x 8 x i16> %zn) #0 {
+; CHECK-LABEL: udot_single_za32_u16_vg1x2_tuple:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    add x9, x0, x1
+; CHECK-NEXT:    ld1h { z2.h, z3.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z6.h, z7.h }, pn8/z, [x9]
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z7.d
+; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z5.h, z6.h }, z0.h
+; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z3.h, z4.h }, z0.h
+; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
+  %1 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %ptr)
+  %2 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 0
+  %3 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
+  %4 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2)
+  %5 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %4, 0
+  %6 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %4, 1
+  call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 0, <vscale x 8 x i16> %2, <vscale x 8 x i16> %5, <vscale x 8 x i16> %zn)
+  call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 0, <vscale x 8 x i16> %3, <vscale x 8 x i16> %6, <vscale x 8 x i16> %zn)
+  ret void
+}
+
 define void @udot_single_za32_u16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4) #0 {
 ; CHECK-LABEL: udot_single_za32_u16_vg1x4:
 ; CHECK:       // %bb.0:
@@ -332,6 +365,102 @@ define void @udot_single_za32_u16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused,
   ret void
 }
 
+define void @udot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, <vscale x 8 x i16> %zn) #0 {
+; CHECK-LABEL: udot_single_za32_u16_vg1x4_tuple:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-13
+; CHECK-NEXT:    add x10, x1, x1, lsl #1
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z19, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x9, x0, x1
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z18, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z15, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z14, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z12, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z8, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1h { z4.h - z7.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z28.h - z31.h }, pn8/z, [x9]
+; CHECK-NEXT:    add x9, x0, x10
+; CHECK-NEXT:    ld1h { z24.h - z27.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ld1h { z8.h - z11.h }, pn8/z, [x9]
+; CHECK-NEXT:    mov z1.d, z4.d
+; CHECK-NEXT:    mov z2.d, z28.d
+; CHECK-NEXT:    mov z12.d, z5.d
+; CHECK-NEXT:    mov z3.d, z24.d
+; CHECK-NEXT:    mov z4.d, z8.d
+; CHECK-NEXT:    mov z13.d, z29.d
+; CHECK-NEXT:    mov z14.d, z25.d
+; CHECK-NEXT:    mov z15.d, z9.d
+; CHECK-NEXT:    mov z16.d, z6.d
+; CHECK-NEXT:    mov z17.d, z30.d
+; CHECK-NEXT:    mov z18.d, z26.d
+; CHECK-NEXT:    mov z19.d, z10.d
+; CHECK-NEXT:    mov z8.d, z7.d
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z1.h - z4.h }, z0.h
+; CHECK-NEXT:    mov z9.d, z31.d
+; CHECK-NEXT:    mov z10.d, z27.d
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z12.h - z15.h }, z0.h
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z16.h - z19.h }, z0.h
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z8.h - z11.h }, z0.h
+; CHECK-NEXT:    ldr z19, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #13
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
+  %1 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %ptr)
+  %2 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 0
+  %3 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 1
+  %4 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 2
+  %5 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 3
+  %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
+  %6 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2)
+  %7 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %6, 0
+  %8 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %6, 1
+  %9 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %6, 2
+  %10 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %6, 3
+  %mul3 = shl i64 %stride, 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3
+  %11 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx4)
+  %12 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %11, 0
+  %13 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %11, 1
+  %14 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %11, 2
+  %15 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %11, 3
+  %mul5 = mul i64 %stride, 3
+  %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5
+  %16 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx6)
+  %17 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %16, 0
+  %18 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %16, 1
+  %19 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %16, 2
+  %20 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %16, 3
+  call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 0, <vscale x 8 x i16> %2, <vscale x 8 x i16> %7, <vscale x 8 x i16> %12, <vscale x 8 x i16> %17, <vscale x 8 x i16> %zn)
+  call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 0, <vscale x 8 x i16> %3, <vscale x 8 x i16> %8, <vscale x 8 x i16> %13, <vscale x 8 x i16> %18, <vscale x 8 x i16> %zn)
+  call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 0, <vscale x 8 x i16> %4, <vscale x 8 x i16> %9, <vscale x 8 x i16> %14, <vscale x 8 x i16> %19, <vscale x 8 x i16> %zn)
+  call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 0, <vscale x 8 x i16> %5, <vscale x 8 x i16> %10, <vscale x 8 x i16> %15, <vscale x 8 x i16> %20, <vscale x 8 x i16> %zn)
+  ret void
+}
+
 define void @udot_single_za32_u8_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2) #0 {
 ; CHECK-LABEL: udot_single_za32_u8_vg1x2:
 ; CHECK:       // %bb.0:
@@ -397,6 +526,38 @@ define void @usdot_single_za32_u8_vg1x2(i32 %slice, <vscale x 16 x i8> %unused,
   ret void
 }
 
+define void @usdot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, <vscale x 16 x i8> %zn) #0 {
+; CHECK-LABEL: usdot_single_za32_u16_vg1x2_tuple:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    ld1b { z2.b, z3.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z6.b, z7.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z7.d
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z5.b, z6.b }, z0.b
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z3.b, z4.b }, z0.b
+; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
+  %1 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr)
+  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
+  %3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
+  %4 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2)
+  %5 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %4, 0
+  %6 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %4, 1
+  call void @llvm.aarch64.sme.usdot.single.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %2, <vscale x 16 x i8> %5, <vscale x 16 x i8> %zn)
+  call void @llvm.aarch64.sme.usdot.single.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %6, <vscale x 16 x i8> %zn)
+  ret void
+}
+
 define void @usdot_single_za32_u8_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4) #0 {
 ; CHECK-LABEL: usdot_single_za32_u8_vg1x4:
 ; CHECK:       // %bb.0:
@@ -410,6 +571,100 @@ define void @usdot_single_za32_u8_vg1x4(i32 %slice, <vscale x 16 x i8> %unused,
   ret void
 }
 
+define void @usdot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, <vscale x 16 x i8> %zn) #0 {
+; CHECK-LABEL: usdot_single_za32_u16_vg1x4_tuple:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-13
+; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z19, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z18, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z15, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z14, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z12, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z8, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z24.b - z27.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z4.b - z7.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z8.b - z11.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    add x9, x9, x1
+; CHECK-NEXT:    ld1b { z28.b - z31.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    mov z1.d, z24.d
+; CHECK-NEXT:    mov z2.d, z4.d
+; CHECK-NEXT:    mov z3.d, z8.d
+; CHECK-NEXT:    mov z12.d, z25.d
+; CHECK-NEXT:    mov z13.d, z5.d
+; CHECK-NEXT:    mov z14.d, z9.d
+; CHECK-NEXT:    mov z4.d, z28.d
+; CHECK-NEXT:    mov z15.d, z29.d
+; CHECK-NEXT:    mov z16.d, z26.d
+; CHECK-NEXT:    mov z17.d, z6.d
+; CHECK-NEXT:    mov z18.d, z10.d
+; CHECK-NEXT:    mov z19.d, z30.d
+; CHECK-NEXT:    mov z28.d, z27.d
+; CHECK-NEXT:    mov z29.d, z7.d
+; CHECK-NEXT:    mov z30.d, z11.d
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z1.b - z4.b }, z0.b
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b
+; CHECK-NEXT:    ldr z19, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #13
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
+  %1 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr)
+  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
+  %3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
+  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 2
+  %5 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 3
+  %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
+  %6 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2)
+  %7 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 0
+  %8 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 1
+  %9 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 2
+  %10 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 3
+  %mul3 = shl i64 %stride, 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3
+  %11 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4)
+  %12 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 0
+  %13 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 1
+  %14 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 2
+  %15 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 3
+  %mul5 = mul i64 %stride, 3
+  %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5
+  %16 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6)
+  %17 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 0
+  %18 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 1
+  %19 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 2
+  %20 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 3
+  call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %2, <vscale x 16 x i8> %7, <vscale x 16 x i8> %12, <vscale x 16 x i8> %17, <vscale x 16 x i8> %zn)
+  call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> %zn)
+  call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> %zn)
+  call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> %zn)
+  ret void
+}
 
 ; == Multi, single (signed) ==
 
@@ -426,6 +681,39 @@ define void @sdot_single_za32_u16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused,
   ret void
 }
 
+define void @sdot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, <vscale x 8 x i16> %zn) #0 {
+; CHECK-LABEL: sdot_single_za32_u16_vg1x2_tuple:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    add x9, x0, x1
+; CHECK-NEXT:    ld1h { z2.h, z3.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z6.h, z7.h }, pn8/z, [x9]
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z7.d
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z5.h, z6.h }, z0.h
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z3.h, z4.h }, z0.h
+; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
+  %1 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %ptr)
+  %2 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 0
+  %3 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
+  %4 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2)
+  %5 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %4, 0
+  %6 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %4, 1
+  call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv8i16(i32 0, <vscale x 8 x i16> %2, <vscale x 8 x i16> %5, <vscale x 8 x i16> %zn)
+  call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv8i16(i32 0, <vscale x 8 x i16> %3, <vscale x 8 x i16> %6, <vscale x 8 x i16> %zn)
+  ret void
+}
+
 define void @sdot_single_za32_u16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4) #0 {
 ; CHECK-LABEL: sdot_single_za32_u16_vg1x4:
 ; CHECK:       // %bb.0:
@@ -439,6 +727,102 @@ define void @sdot_single_za32_u16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused,
   ret void
 }
 
+define void @sdot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, <vscale x 8 x i16> %zn) #0 {
+; CHECK-LABEL: sdot_single_za32_u16_vg1x4_tuple:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-13
+; CHECK-NEXT:    add x10, x1, x1, lsl #1
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z19, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x9, x0, x1
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z18, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z15, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z14, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z12, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z8, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1h { z4.h - z7.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z28.h - z31.h }, pn8/z, [x9]
+; CHECK-NEXT:    add x9, x0, x10
+; CHECK-NEXT:    ld1h { z24.h - z27.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ld1h { z8.h - z11.h }, pn8/z, [x9]
+; CHECK-NEXT:    mov z1.d, z4.d
+; CHECK-NEXT:    mov z2.d, z28.d
+; CHECK-NEXT:    mov z12.d, z5.d
+; CHECK-NEXT:    mov z3.d, z24.d
+; CHECK-NEXT:    mov z4.d, z8.d
+; CHECK-NEXT:    mov z13.d, z29.d
+; CHECK-NEXT:    mov z14.d, z25.d
+; CHECK-NEXT:    mov z15.d, z9.d
+; CHECK-NEXT:    mov z16.d, z6.d
+; CHECK-NEXT:    mov z17.d, z30.d
+; CHECK-NEXT:    mov z18.d, z26.d
+; CHECK-NEXT:    mov z19.d, z10.d
+; CHECK-NEXT:    mov z8.d, z7.d
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z0.h
+; CHECK-NEXT:    mov z9.d, z31.d
+; CHECK-NEXT:    mov z10.d, z27.d
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z12.h - z15.h }, z0.h
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z16.h - z19.h }, z0.h
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z8.h - z11.h }, z0.h
+; CHECK-NEXT:    ldr z19, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #13
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
+  %1 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %ptr)
+  %2 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 0
+  %3 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 1
+  %4 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 2
+  %5 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %1, 3
+  %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
+  %6 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx2)
+  %7 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %6, 0
+  %8 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %6, 1
+  %9 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %6, 2
+  %10 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %6, 3
+  %mul3 = shl i64 %stride, 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3
+  %11 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx4)
+  %12 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %11, 0
+  %13 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %11, 1
+  %14 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %11, 2
+  %15 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %11, 3
+  %mul5 = mul i64 %stride, 3
+  %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5
+  %16 = tail call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %0, ptr %arrayidx6)
+  %17 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %16, 0
+  %18 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %16, 1
+  %19 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %16, 2
+  %20 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %16, 3
+  call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 0, <vscale x 8 x i16> %2, <vscale x 8 x i16> %7, <vscale x 8 x i16> %12, <vscale x 8 x i16> %17, <vscale x 8 x i16> %zn)
+  call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 0, <vscale x 8 x i16> %3, <vscale x 8 x i16> %8, <vscale x 8 x i16> %13, <vscale x 8 x i16> %18, <vscale x 8 x i16> %zn)
+  call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 0, <vscale x 8 x i16> %4, <vscale x 8 x i16> %9, <vscale x 8 x i16> %14, <vscale x 8 x i16> %19, <vscale x 8 x i16> %zn)
+  call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 0, <vscale x 8 x i16> %5, <vscale x 8 x i16> %10, <vscale x 8 x i16> %15, <vscale x 8 x i16> %20, <vscale x 8 x i16> %zn)
+  ret void
+}
+
 define void @sdot_single_za32_u8_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2) #0 {
 ; CHECK-LABEL: sdot_single_za32_u8_vg1x2:
 ; CHECK:       // %bb.0:
@@ -504,6 +888,38 @@ define void @sudot_single_za32_u8_vg1x2(i32 %slice, <vscale x 16 x i8> %unused,
   ret void
 }
 
+define void @sudot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, <vscale x 16 x i8> %zn) #0 {
+; CHECK-LABEL: sudot_single_za32_u16_vg1x2_tuple:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    ld1b { z2.b, z3.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z6.b, z7.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z7.d
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z5.b, z6.b }, z0.b
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z3.b, z4.b }, z0.b
+; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
+  %1 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %ptr)
+  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
+  %3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
+  %4 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2)
+  %5 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %4, 0
+  %6 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %4, 1
+  call void @llvm.aarch64.sme.sudot.single.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %2, <vscale x 16 x i8> %5, <vscale x 16 x i8> %zn)
+  call void @llvm.aarch64.sme.sudot.single.za32.vg1x2.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %6, <vscale x 16 x i8> %zn)
+  ret void
+}
+
 define void @sudot_single_za32_u8_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4) #0 {
 ; CHECK-LABEL: sudot_single_za32_u8_vg1x4:
 ; CHECK:       // %bb.0:
@@ -517,6 +933,101 @@ define void @sudot_single_za32_u8_vg1x4(i32 %slice, <vscale x 16 x i8> %unused,
   ret void
 }
 
+define void @sudot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, <vscale x 16 x i8> %zn) #0 {
+; CHECK-LABEL: sudot_single_za32_u16_vg1x4_tuple:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-13
+; CHECK-NEXT:    lsl x9, x1, #1
+; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT:    ptrue pn8.b
+; CHECK-NEXT:    str z19, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    str z18, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z17, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z16, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z15, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z14, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z12, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z8, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z24.b - z27.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z4.b - z7.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z8.b - z11.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    add x9, x9, x1
+; CHECK-NEXT:    ld1b { z28.b - z31.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    mov z1.d, z24.d
+; CHECK-NEXT:    mov z2.d, z4.d
+; CHECK-NEXT:    mov z3.d, z8.d
+; CHECK-NEXT:    mov z12.d, z25.d
+; CHECK-NEXT:    mov z13.d, z5.d
+; CHECK-NEXT:    mov z14.d, z9.d
+; CHECK-NEXT:    mov z4.d, z28.d
+; CHECK-NEXT:    mov z15.d, z29.d
+; CHECK-NEXT:    mov z16.d, z26.d
+; CHECK-NEXT:    mov z17.d, z6.d
+; CHECK-NEXT:    mov z18.d, z10.d
+; CHECK-NEXT:    mov z19.d, z30.d
+; CHECK-NEXT:    mov z28.d, z27.d
+; CHECK-NEXT:    mov z29.d, z7.d
+; CHECK-NEXT:    mov z30.d, z11.d
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z1.b - z4.b }, z0.b
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b
+; CHECK-NEXT:    ldr z19, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z18, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z17, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT:    addvl sp, sp, #13
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
+  %1 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %ptr)
+  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
+  %3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
+  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 2
+  %5 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 3
+  %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
+  %6 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx2)
+  %7 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 0
+  %8 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 1
+  %9 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 2
+  %10 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %6, 3
+  %mul3 = shl i64 %stride, 1
+  %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3
+  %11 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx4)
+  %12 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 0
+  %13 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 1
+  %14 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 2
+  %15 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %11, 3
+  %mul5 = mul i64 %stride, 3
+  %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5
+  %16 = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %0, ptr %arrayidx6)
+  %17 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 0
+  %18 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 1
+  %19 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 2
+  %20 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %16, 3
+  call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %2, <vscale x 16 x i8> %7, <vscale x 16 x i8> %12, <vscale x 16 x i8> %17, <vscale x 16 x i8> %zn)
+  call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %3, <vscale x 16 x i8> %8, <vscale x 16 x i8> %13, <vscale x 16 x i8> %18, <vscale x 16 x i8> %zn)
+  call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %4, <vscale x 16 x i8> %9, <vscale x 16 x i8> %14, <vscale x 16 x i8> %19, <vscale x 16 x i8> %zn)
+  call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 0, <vscale x 16 x i8> %5, <vscale x 16 x i8> %10, <vscale x 16 x i8> %15, <vscale x 16 x i8> %20, <vscale x 16 x i8> %zn)
+  ret void
+}
+
 ; == Multi, indexed (unsigned) ==
 
 define void @udot_lane_za32_u16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2) #0 {

>From 463c0fa28d528e41fed19ce14db5035f844a568d Mon Sep 17 00:00:00 2001
From: Kerry McLaughlin <kerry.mclaughlin at arm.com>
Date: Thu, 16 Jan 2025 10:05:50 +0000
Subject: [PATCH 2/3] [AArch64][SME] Create separate FORM_TRANSPOSE pseudos for
 ZPR & ZPRMul classes

The FORM_TRANSPOSED_REG_TUPLE pseudo nodes use either the ZPR2Mul2 or ZPR4Mul4
register classes for output. To extend these to other multi-vector intrinsics
which instead create a ZPR2/ZPR4 REG_SEQUENCE, a new pseudo has been added and
the existing one renamed.
---
 .../AArch64/AArch64ExpandPseudoInsts.cpp      |   2 +
 .../Target/AArch64/AArch64ISelLowering.cpp    |  10 +-
 llvm/lib/Target/AArch64/AArch64InstrInfo.h    |  12 +
 .../Target/AArch64/AArch64RegisterInfo.cpp    |  14 +-
 llvm/lib/Target/AArch64/SMEInstrFormats.td    |  26 +-
 .../AArch64/sme2-intrinsics-int-dots.ll       | 412 ++++++++----------
 6 files changed, 223 insertions(+), 253 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index b44c48afe705ba..239070925aa3aa 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -1755,8 +1755,10 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
          MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
          AArch64::LDNT1D_4Z, AArch64::LDNT1D_4Z_STRIDED);
    case AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO:
+   case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO:
      return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 2);
    case AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO:
+   case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX4_PSEUDO:
      return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 4);
   }
   return false;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d4a114c275fb76..289ada6b4f86f3 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -8759,7 +8759,7 @@ static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
 //   %6:zpr2stridedorcontiguous = LD1B_2Z_PSEUDO ..
 //   %7:zpr = COPY %6.zsub1:zpr2stridedorcontiguous
 //   %8:zpr = COPY %6.zsub0:zpr2stridedorcontiguous
-//   %9:zpr2mul2 = FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO %5:zpr, %8:zpr
+//   %9:zpr2mul2 = FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO %5:zpr, %8:zpr
 //
 bool shouldUseFormStridedPseudo(MachineInstr &MI) {
   MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
@@ -8767,9 +8767,11 @@ bool shouldUseFormStridedPseudo(MachineInstr &MI) {
   const TargetRegisterClass *RegClass = nullptr;
   switch (MI.getOpcode()) {
   case AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO:
+  case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO:
     RegClass = &AArch64::ZPR2StridedOrContiguousRegClass;
     break;
   case AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO:
+  case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX4_PSEUDO:
     RegClass = &AArch64::ZPR4StridedOrContiguousRegClass;
     break;
   default:
@@ -8824,14 +8826,14 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
     }
   }
 
-  if (MI.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO ||
-      MI.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO) {
+  const AArch64InstrInfo *TII =
+      MI.getMF()->getSubtarget<AArch64Subtarget>().getInstrInfo();
+  if (TII->isFormTransposedOpcode(MI.getOpcode())) {
     // If input values to the FORM_TRANSPOSED_REG_TUPLE pseudo aren't copies
     // from a StridedOrContiguous class, fall back on REG_SEQUENCE node.
     if (shouldUseFormStridedPseudo(MI))
       return;
 
-    const TargetInstrInfo *TII = Subtarget->getInstrInfo();
     MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
                                       TII->get(TargetOpcode::REG_SEQUENCE),
                                       MI.getOperand(0).getReg());
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index e37f70f7d985de..f45d868546e7f4 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -548,6 +548,18 @@ class AArch64InstrInfo final : public AArch64GenInstrInfo {
                                                Register TargetReg,
                                                bool FrameSetup) const;
 
+  bool isFormTransposedOpcode(unsigned Opc) const {
+    switch (Opc) {
+    case AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO:
+    case AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO:
+    case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO:
+    case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX4_PSEUDO:
+      return true;
+    default:
+      return false;
+    }
+  }
+
 #define GET_INSTRINFO_HELPER_DECLS
 #include "AArch64GenInstrInfo.inc"
 
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 5973b63b5a8024..773d9946c192f1 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -1109,13 +1109,14 @@ bool AArch64RegisterInfo::getRegAllocationHints(
   // so we add the strided registers as a hint.
   unsigned RegID = MRI.getRegClass(VirtReg)->getID();
   // Look through uses of the register for FORM_TRANSPOSED_REG_TUPLE.
+  const AArch64InstrInfo *TII =
+      MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
   if ((RegID == AArch64::ZPR2StridedOrContiguousRegClassID ||
        RegID == AArch64::ZPR4StridedOrContiguousRegClassID) &&
-      any_of(MRI.use_nodbg_instructions(VirtReg), [](const MachineInstr &Use) {
-        return Use.getOpcode() ==
-                   AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO ||
-               Use.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO;
-      })) {
+      any_of(MRI.use_nodbg_instructions(VirtReg),
+             [&TII](const MachineInstr &Use) {
+               return TII->isFormTransposedOpcode(Use.getOpcode());
+             })) {
     const TargetRegisterClass *StridedRC =
         RegID == AArch64::ZPR2StridedOrContiguousRegClassID
             ? &AArch64::ZPR2StridedRegClass
@@ -1130,8 +1131,7 @@ bool AArch64RegisterInfo::getRegAllocationHints(
   }
 
   for (MachineInstr &MI : MRI.def_instructions(VirtReg)) {
-    if (MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
-        MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
+    if (!TII->isFormTransposedOpcode(MI.getOpcode()))
       return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
                                                        MF, VRM);
 
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 81004e70dc179b..918637a9c6d3cb 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -45,20 +45,26 @@ def am_sme_indexed_b4 : ComplexPattern<iPTR, 2, "SelectAddrModeIndexedSVE<0, 15>
 // If the operands do not match this pattern, the pseudos are expanded
 // to a REG_SEQUENCE using the post-isel hook.
 
-def FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO :
-  Pseudo<(outs ZPR2Mul2:$tup),
-         (ins ZPR:$zn0, ZPR:$zn1), []>, Sched<[]>{
+class sme_form_transpose_x2_pseudo<RegisterClass multi_vector_class>
+    : Pseudo<(outs multi_vector_class:$tup), (ins ZPR:$zn0, ZPR:$zn1), []>,
+      Sched<[]> {
   let hasSideEffects = 0;
   let hasPostISelHook = 1;
 }
 
-def FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO :
-  Pseudo<(outs ZPR4Mul4:$tup),
-         (ins ZPR:$zn0, ZPR:$zn1, ZPR:$zn2, ZPR:$zn3), []>, Sched<[]>{
+def FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO    : sme_form_transpose_x2_pseudo<ZPR2>;
+def FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO : sme_form_transpose_x2_pseudo<ZPR2Mul2>;
+
+class sme_form_transpose_x4_pseudo<RegisterClass multi_vector_class>
+    : Pseudo<(outs multi_vector_class:$tup), (ins ZPR:$zn0, ZPR:$zn1, ZPR:$zn2, ZPR:$zn3), []>,
+      Sched<[]> {
   let hasSideEffects = 0;
   let hasPostISelHook = 1;
 }
 
+def FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO    : sme_form_transpose_x4_pseudo<ZPR4>;
+def FORM_TRANSPOSED_REG_TUPLE_MULX4_PSEUDO : sme_form_transpose_x4_pseudo<ZPR4Mul4>;
+
 def SDTZALoadStore : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisInt<2>]>;
 def AArch64SMELdr : SDNode<"AArch64ISD::SME_ZA_LDR", SDTZALoadStore,
                              [SDNPHasChain, SDNPSideEffect, SDNPMayLoad]>;
@@ -164,14 +170,14 @@ class SME2_ZA_TwoOp_Multi_Single_Pat<string name, SDPatternOperator intrinsic, O
 class SME2_ZA_TwoOp_VG2_Multi_Single_Pat<string name, SDPatternOperator intrinsic, Operand index_ty, ZPRRegOp zpr_ty,
                                          ValueType vt, ComplexPattern tileslice>
     : Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, index_ty:$offset)), vt:$Zn1, vt:$Zn2, vt:$Zm),
-          (!cast<Instruction>(name # _PSEUDO) $base, $offset, (REG_SEQUENCE ZPR2, vt:$Zn1, zsub0, vt:$Zn2, zsub1),
+          (!cast<Instruction>(name # _PSEUDO) $base, $offset, (FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO vt:$Zn1, vt:$Zn2),
                                               zpr_ty:$Zm)>;
 class SME2_ZA_TwoOp_VG4_Multi_Single_Pat<string name, SDPatternOperator intrinsic, Operand index_ty, ZPRRegOp zpr_ty,
                                          ValueType vt, ComplexPattern tileslice>
     : Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, index_ty:$offset)),
                      vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4, vt:$Zm),
           (!cast<Instruction>(name # _PSEUDO) $base, $offset,
-                                              (REG_SEQUENCE ZPR4, vt:$Zn1, zsub0, vt:$Zn2, zsub1, vt:$Zn3, zsub2, vt:$Zn4, zsub3),
+                                              (FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4),
                                               zpr_ty:$Zm)>;
 
 class SME2_ZA_TwoOp_VG2_Multi_Multi_Pat<string name, SDPatternOperator intrinsic, Operand index_ty, ValueType vt, ComplexPattern tileslice>
@@ -197,14 +203,14 @@ class SME2_ZA_TwoOp_VG2_Multi_Index_Pat<string name, SDPatternOperator intrinsic
                                         Operand imm_ty, ComplexPattern tileslice>
     : Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, index_ty:$offset)), vt:$Zn1, vt:$Zn2, vt:$Zm, (i32 imm_ty:$i)),
           (!cast<Instruction>(name # _PSEUDO) $base, $offset,
-                                              (FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO vt:$Zn1,vt:$Zn2), zpr_ty:$Zm, imm_ty:$i)>;
+                                              (FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO vt:$Zn1,vt:$Zn2), zpr_ty:$Zm, imm_ty:$i)>;
 
 class SME2_ZA_TwoOp_VG4_Multi_Index_Pat<string name, SDPatternOperator intrinsic, Operand index_ty, ZPRRegOp zpr_ty, ValueType vt,
                                         Operand imm_ty, ComplexPattern tileslice>
     : Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, index_ty:$offset)),
                      vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4, vt:$Zm, (i32 imm_ty:$i)),
           (!cast<Instruction>(name # _PSEUDO) $base, $offset,
-                                              (FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4),
+                                              (FORM_TRANSPOSED_REG_TUPLE_MULX4_PSEUDO vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4),
                                               zpr_ty:$Zm, imm_ty:$i)>;
 
 class SME2_Sat_Shift_VG2_Pat<string name, SDPatternOperator intrinsic, ValueType out_vt, ValueType in_vt, Operand imm_ty>
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
index df422df5c13c81..967d168593a400 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
@@ -323,19 +323,21 @@ define void @udot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, <vscale x 8
 ; CHECK-LABEL: udot_single_za32_u16_vg1x2_tuple:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    addvl sp, sp, #-3
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    add x9, x0, x1
-; CHECK-NEXT:    ld1h { z2.h, z3.h }, pn8/z, [x0]
-; CHECK-NEXT:    ld1h { z6.h, z7.h }, pn8/z, [x9]
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    mov z5.d, z2.d
-; CHECK-NEXT:    mov z4.d, z7.d
-; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z5.h, z6.h }, z0.h
-; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z3.h, z4.h }, z0.h
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1h { z1.h, z9.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z2.h, z10.h }, pn8/z, [x9]
+; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z1.h, z2.h }, z0.h
+; CHECK-NEXT:    udot za.s[w8, 0, vgx2], { z9.h, z10.h }, z0.h
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -369,62 +371,47 @@ define void @udot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, <vscale x 8
 ; CHECK-LABEL: udot_single_za32_u16_vg1x4_tuple:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-13
-; CHECK-NEXT:    add x10, x1, x1, lsl #1
+; CHECK-NEXT:    addvl sp, sp, #-11
+; CHECK-NEXT:    add x9, x1, x1, lsl #1
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z19, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    add x9, x0, x1
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x0, x1
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z18, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z17, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z16, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z15, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z14, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1h { z4.h - z7.h }, pn8/z, [x0]
-; CHECK-NEXT:    ld1h { z28.h - z31.h }, pn8/z, [x9]
-; CHECK-NEXT:    add x9, x0, x10
-; CHECK-NEXT:    ld1h { z24.h - z27.h }, pn8/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ld1h { z8.h - z11.h }, pn8/z, [x9]
-; CHECK-NEXT:    mov z1.d, z4.d
-; CHECK-NEXT:    mov z2.d, z28.d
-; CHECK-NEXT:    mov z12.d, z5.d
-; CHECK-NEXT:    mov z3.d, z24.d
-; CHECK-NEXT:    mov z4.d, z8.d
-; CHECK-NEXT:    mov z13.d, z29.d
-; CHECK-NEXT:    mov z14.d, z25.d
-; CHECK-NEXT:    mov z15.d, z9.d
-; CHECK-NEXT:    mov z16.d, z6.d
-; CHECK-NEXT:    mov z17.d, z30.d
-; CHECK-NEXT:    mov z18.d, z26.d
-; CHECK-NEXT:    mov z19.d, z10.d
-; CHECK-NEXT:    mov z8.d, z7.d
+; CHECK-NEXT:    str z16, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x9, x0, x9
+; CHECK-NEXT:    str z15, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z14, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z12, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z10, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z8, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1h { z1.h, z5.h, z9.h, z13.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z2.h, z6.h, z10.h, z14.h }, pn8/z, [x10]
+; CHECK-NEXT:    ld1h { z3.h, z7.h, z11.h, z15.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ld1h { z16.h, z20.h, z24.h, z28.h }, pn8/z, [x9]
+; CHECK-NEXT:    mov z4.d, z16.d
+; CHECK-NEXT:    mov z8.d, z20.d
+; CHECK-NEXT:    mov z12.d, z24.d
+; CHECK-NEXT:    mov z16.d, z28.d
 ; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z1.h - z4.h }, z0.h
-; CHECK-NEXT:    mov z9.d, z31.d
-; CHECK-NEXT:    mov z10.d, z27.d
-; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z12.h - z15.h }, z0.h
-; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z16.h - z19.h }, z0.h
-; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z8.h - z11.h }, z0.h
-; CHECK-NEXT:    ldr z19, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z5.h - z8.h }, z0.h
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z9.h - z12.h }, z0.h
+; CHECK-NEXT:    udot za.s[w8, 0, vgx4], { z13.h - z16.h }, z0.h
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #10, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #13
+; CHECK-NEXT:    addvl sp, sp, #11
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -530,18 +517,20 @@ define void @usdot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, <vscale x
 ; CHECK-LABEL: usdot_single_za32_u16_vg1x2_tuple:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    addvl sp, sp, #-3
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    ld1b { z2.b, z3.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z6.b, z7.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    mov z5.d, z2.d
-; CHECK-NEXT:    mov z4.d, z7.d
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z5.b, z6.b }, z0.b
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z3.b, z4.b }, z0.b
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z1.b, z2.b }, z0.b
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx2], { z9.b, z10.b }, z0.b
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -575,61 +564,46 @@ define void @usdot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, <vscale x
 ; CHECK-LABEL: usdot_single_za32_u16_vg1x4_tuple:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-13
+; CHECK-NEXT:    addvl sp, sp, #-11
 ; CHECK-NEXT:    lsl x9, x1, #1
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z19, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z18, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z17, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z16, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z15, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z14, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z24.b - z27.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z4.b - z7.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z8.b - z11.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    add x9, x9, x1
-; CHECK-NEXT:    ld1b { z28.b - z31.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    mov z1.d, z24.d
-; CHECK-NEXT:    mov z2.d, z4.d
-; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    mov z12.d, z25.d
-; CHECK-NEXT:    mov z13.d, z5.d
-; CHECK-NEXT:    mov z14.d, z9.d
-; CHECK-NEXT:    mov z4.d, z28.d
-; CHECK-NEXT:    mov z15.d, z29.d
-; CHECK-NEXT:    mov z16.d, z26.d
-; CHECK-NEXT:    mov z17.d, z6.d
-; CHECK-NEXT:    mov z18.d, z10.d
-; CHECK-NEXT:    mov z19.d, z30.d
-; CHECK-NEXT:    mov z28.d, z27.d
-; CHECK-NEXT:    mov z29.d, z7.d
-; CHECK-NEXT:    mov z30.d, z11.d
+; CHECK-NEXT:    str z16, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z15, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z14, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z12, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z10, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z8, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
+; CHECK-NEXT:    mov z4.d, z16.d
+; CHECK-NEXT:    mov z8.d, z20.d
+; CHECK-NEXT:    mov z12.d, z24.d
+; CHECK-NEXT:    mov z16.d, z28.d
 ; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z1.b - z4.b }, z0.b
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b
-; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b
-; CHECK-NEXT:    ldr z19, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z5.b - z8.b }, z0.b
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z9.b - z12.b }, z0.b
+; CHECK-NEXT:    usdot za.s[w8, 0, vgx4], { z13.b - z16.b }, z0.b
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #10, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #13
+; CHECK-NEXT:    addvl sp, sp, #11
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -685,19 +659,21 @@ define void @sdot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, <vscale x 8
 ; CHECK-LABEL: sdot_single_za32_u16_vg1x2_tuple:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    addvl sp, sp, #-3
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    add x9, x0, x1
-; CHECK-NEXT:    ld1h { z2.h, z3.h }, pn8/z, [x0]
-; CHECK-NEXT:    ld1h { z6.h, z7.h }, pn8/z, [x9]
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    mov z5.d, z2.d
-; CHECK-NEXT:    mov z4.d, z7.d
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z5.h, z6.h }, z0.h
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z3.h, z4.h }, z0.h
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1h { z1.h, z9.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z2.h, z10.h }, pn8/z, [x9]
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z0.h
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx2], { z9.h, z10.h }, z0.h
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -731,62 +707,47 @@ define void @sdot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, <vscale x 8
 ; CHECK-LABEL: sdot_single_za32_u16_vg1x4_tuple:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-13
-; CHECK-NEXT:    add x10, x1, x1, lsl #1
+; CHECK-NEXT:    addvl sp, sp, #-11
+; CHECK-NEXT:    add x9, x1, x1, lsl #1
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z19, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    add x9, x0, x1
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x0, x1
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z18, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z17, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z16, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z15, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z14, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1h { z4.h - z7.h }, pn8/z, [x0]
-; CHECK-NEXT:    ld1h { z28.h - z31.h }, pn8/z, [x9]
-; CHECK-NEXT:    add x9, x0, x10
-; CHECK-NEXT:    ld1h { z24.h - z27.h }, pn8/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ld1h { z8.h - z11.h }, pn8/z, [x9]
-; CHECK-NEXT:    mov z1.d, z4.d
-; CHECK-NEXT:    mov z2.d, z28.d
-; CHECK-NEXT:    mov z12.d, z5.d
-; CHECK-NEXT:    mov z3.d, z24.d
-; CHECK-NEXT:    mov z4.d, z8.d
-; CHECK-NEXT:    mov z13.d, z29.d
-; CHECK-NEXT:    mov z14.d, z25.d
-; CHECK-NEXT:    mov z15.d, z9.d
-; CHECK-NEXT:    mov z16.d, z6.d
-; CHECK-NEXT:    mov z17.d, z30.d
-; CHECK-NEXT:    mov z18.d, z26.d
-; CHECK-NEXT:    mov z19.d, z10.d
-; CHECK-NEXT:    mov z8.d, z7.d
+; CHECK-NEXT:    str z16, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x9, x0, x9
+; CHECK-NEXT:    str z15, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z14, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z12, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z10, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z8, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1h { z1.h, z5.h, z9.h, z13.h }, pn8/z, [x0]
+; CHECK-NEXT:    ld1h { z2.h, z6.h, z10.h, z14.h }, pn8/z, [x10]
+; CHECK-NEXT:    ld1h { z3.h, z7.h, z11.h, z15.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ld1h { z16.h, z20.h, z24.h, z28.h }, pn8/z, [x9]
+; CHECK-NEXT:    mov z4.d, z16.d
+; CHECK-NEXT:    mov z8.d, z20.d
+; CHECK-NEXT:    mov z12.d, z24.d
+; CHECK-NEXT:    mov z16.d, z28.d
 ; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z0.h
-; CHECK-NEXT:    mov z9.d, z31.d
-; CHECK-NEXT:    mov z10.d, z27.d
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z12.h - z15.h }, z0.h
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z16.h - z19.h }, z0.h
-; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z8.h - z11.h }, z0.h
-; CHECK-NEXT:    ldr z19, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z5.h - z8.h }, z0.h
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z9.h - z12.h }, z0.h
+; CHECK-NEXT:    sdot za.s[w8, 0, vgx4], { z13.h - z16.h }, z0.h
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #10, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #13
+; CHECK-NEXT:    addvl sp, sp, #11
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -892,18 +853,20 @@ define void @sudot_single_za32_u16_vg1x2_tuple(ptr %ptr, i64 %stride, <vscale x
 ; CHECK-LABEL: sudot_single_za32_u16_vg1x2_tuple:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    addvl sp, sp, #-3
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    ld1b { z2.b, z3.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z6.b, z7.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    mov z5.d, z2.d
-; CHECK-NEXT:    mov z4.d, z7.d
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z5.b, z6.b }, z0.b
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z3.b, z4.b }, z0.b
+; CHECK-NEXT:    str z10, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z9.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z10.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z1.b, z2.b }, z0.b
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx2], { z9.b, z10.b }, z0.b
+; CHECK-NEXT:    ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -937,61 +900,46 @@ define void @sudot_single_za32_u16_vg1x4_tuple(ptr %ptr, i64 %stride, <vscale x
 ; CHECK-LABEL: sudot_single_za32_u16_vg1x4_tuple:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-13
+; CHECK-NEXT:    addvl sp, sp, #-11
 ; CHECK-NEXT:    lsl x9, x1, #1
 ; CHECK-NEXT:    str p8, [sp, #7, mul vl] // 2-byte Folded Spill
 ; CHECK-NEXT:    ptrue pn8.b
-; CHECK-NEXT:    str z19, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z20, [sp, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov w8, wzr
-; CHECK-NEXT:    str z18, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z17, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z16, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z15, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z14, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z13, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z12, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z11, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z10, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z9, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    str z8, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT:    ld1b { z24.b - z27.b }, pn8/z, [x0]
-; CHECK-NEXT:    ld1b { z4.b - z7.b }, pn8/z, [x0, x1]
-; CHECK-NEXT:    ld1b { z8.b - z11.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    add x9, x9, x1
-; CHECK-NEXT:    ld1b { z28.b - z31.b }, pn8/z, [x0, x9]
-; CHECK-NEXT:    mov z1.d, z24.d
-; CHECK-NEXT:    mov z2.d, z4.d
-; CHECK-NEXT:    mov z3.d, z8.d
-; CHECK-NEXT:    mov z12.d, z25.d
-; CHECK-NEXT:    mov z13.d, z5.d
-; CHECK-NEXT:    mov z14.d, z9.d
-; CHECK-NEXT:    mov z4.d, z28.d
-; CHECK-NEXT:    mov z15.d, z29.d
-; CHECK-NEXT:    mov z16.d, z26.d
-; CHECK-NEXT:    mov z17.d, z6.d
-; CHECK-NEXT:    mov z18.d, z10.d
-; CHECK-NEXT:    mov z19.d, z30.d
-; CHECK-NEXT:    mov z28.d, z27.d
-; CHECK-NEXT:    mov z29.d, z7.d
-; CHECK-NEXT:    mov z30.d, z11.d
+; CHECK-NEXT:    str z16, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    add x10, x9, x1
+; CHECK-NEXT:    str z15, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z14, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z13, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z12, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z11, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z10, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z9, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    str z8, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT:    ld1b { z1.b, z5.b, z9.b, z13.b }, pn8/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b, z6.b, z10.b, z14.b }, pn8/z, [x0, x1]
+; CHECK-NEXT:    ld1b { z3.b, z7.b, z11.b, z15.b }, pn8/z, [x0, x9]
+; CHECK-NEXT:    ld1b { z16.b, z20.b, z24.b, z28.b }, pn8/z, [x0, x10]
+; CHECK-NEXT:    mov z4.d, z16.d
+; CHECK-NEXT:    mov z8.d, z20.d
+; CHECK-NEXT:    mov z12.d, z24.d
+; CHECK-NEXT:    mov z16.d, z28.d
 ; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z1.b - z4.b }, z0.b
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z12.b - z15.b }, z0.b
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b
-; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b
-; CHECK-NEXT:    ldr z19, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z18, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z17, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z16, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z15, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z14, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z13, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z12, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z11, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z10, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z9, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr z8, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z5.b - z8.b }, z0.b
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z9.b - z12.b }, z0.b
+; CHECK-NEXT:    sudot za.s[w8, 0, vgx4], { z13.b - z16.b }, z0.b
+; CHECK-NEXT:    ldr z20, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z16, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z15, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z14, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z13, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z12, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z11, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z10, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z9, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr z8, [sp, #10, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT:    addvl sp, sp, #13
+; CHECK-NEXT:    addvl sp, sp, #11
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:

>From 8839f10db34fdbb3aad26bd54f6431c9cd4ec964 Mon Sep 17 00:00:00 2001
From: Kerry McLaughlin <kerry.mclaughlin at arm.com>
Date: Thu, 23 Jan 2025 16:05:41 +0000
Subject: [PATCH 3/3] - Remove new nodes and always use ZPR2/ZPR4 register
 classes

---
 .../AArch64/AArch64ExpandPseudoInsts.cpp      |  2 --
 .../Target/AArch64/AArch64ISelLowering.cpp    | 10 ++++-----
 llvm/lib/Target/AArch64/AArch64InstrInfo.h    | 12 ----------
 .../Target/AArch64/AArch64RegisterInfo.cpp    | 14 ++++++------
 llvm/lib/Target/AArch64/SMEInstrFormats.td    | 22 +++++++------------
 5 files changed, 19 insertions(+), 41 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 239070925aa3aa..b44c48afe705ba 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -1755,10 +1755,8 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
          MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
          AArch64::LDNT1D_4Z, AArch64::LDNT1D_4Z_STRIDED);
    case AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO:
-   case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO:
      return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 2);
    case AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO:
-   case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX4_PSEUDO:
      return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 4);
   }
   return false;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 289ada6b4f86f3..d4a114c275fb76 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -8759,7 +8759,7 @@ static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
 //   %6:zpr2stridedorcontiguous = LD1B_2Z_PSEUDO ..
 //   %7:zpr = COPY %6.zsub1:zpr2stridedorcontiguous
 //   %8:zpr = COPY %6.zsub0:zpr2stridedorcontiguous
-//   %9:zpr2mul2 = FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO %5:zpr, %8:zpr
+//   %9:zpr2mul2 = FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO %5:zpr, %8:zpr
 //
 bool shouldUseFormStridedPseudo(MachineInstr &MI) {
   MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
@@ -8767,11 +8767,9 @@ bool shouldUseFormStridedPseudo(MachineInstr &MI) {
   const TargetRegisterClass *RegClass = nullptr;
   switch (MI.getOpcode()) {
   case AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO:
-  case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO:
     RegClass = &AArch64::ZPR2StridedOrContiguousRegClass;
     break;
   case AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO:
-  case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX4_PSEUDO:
     RegClass = &AArch64::ZPR4StridedOrContiguousRegClass;
     break;
   default:
@@ -8826,14 +8824,14 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
     }
   }
 
-  const AArch64InstrInfo *TII =
-      MI.getMF()->getSubtarget<AArch64Subtarget>().getInstrInfo();
-  if (TII->isFormTransposedOpcode(MI.getOpcode())) {
+  if (MI.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO ||
+      MI.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO) {
     // If input values to the FORM_TRANSPOSED_REG_TUPLE pseudo aren't copies
     // from a StridedOrContiguous class, fall back on REG_SEQUENCE node.
     if (shouldUseFormStridedPseudo(MI))
       return;
 
+    const TargetInstrInfo *TII = Subtarget->getInstrInfo();
     MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
                                       TII->get(TargetOpcode::REG_SEQUENCE),
                                       MI.getOperand(0).getReg());
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index f45d868546e7f4..e37f70f7d985de 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -548,18 +548,6 @@ class AArch64InstrInfo final : public AArch64GenInstrInfo {
                                                Register TargetReg,
                                                bool FrameSetup) const;
 
-  bool isFormTransposedOpcode(unsigned Opc) const {
-    switch (Opc) {
-    case AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO:
-    case AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO:
-    case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO:
-    case AArch64::FORM_TRANSPOSED_REG_TUPLE_MULX4_PSEUDO:
-      return true;
-    default:
-      return false;
-    }
-  }
-
 #define GET_INSTRINFO_HELPER_DECLS
 #include "AArch64GenInstrInfo.inc"
 
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 773d9946c192f1..5973b63b5a8024 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -1109,14 +1109,13 @@ bool AArch64RegisterInfo::getRegAllocationHints(
   // so we add the strided registers as a hint.
   unsigned RegID = MRI.getRegClass(VirtReg)->getID();
   // Look through uses of the register for FORM_TRANSPOSED_REG_TUPLE.
-  const AArch64InstrInfo *TII =
-      MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
   if ((RegID == AArch64::ZPR2StridedOrContiguousRegClassID ||
        RegID == AArch64::ZPR4StridedOrContiguousRegClassID) &&
-      any_of(MRI.use_nodbg_instructions(VirtReg),
-             [&TII](const MachineInstr &Use) {
-               return TII->isFormTransposedOpcode(Use.getOpcode());
-             })) {
+      any_of(MRI.use_nodbg_instructions(VirtReg), [](const MachineInstr &Use) {
+        return Use.getOpcode() ==
+                   AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO ||
+               Use.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO;
+      })) {
     const TargetRegisterClass *StridedRC =
         RegID == AArch64::ZPR2StridedOrContiguousRegClassID
             ? &AArch64::ZPR2StridedRegClass
@@ -1131,7 +1130,8 @@ bool AArch64RegisterInfo::getRegAllocationHints(
   }
 
   for (MachineInstr &MI : MRI.def_instructions(VirtReg)) {
-    if (!TII->isFormTransposedOpcode(MI.getOpcode()))
+    if (MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
+        MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
       return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
                                                        MF, VRM);
 
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 918637a9c6d3cb..a01d59d0e5c43d 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -45,26 +45,20 @@ def am_sme_indexed_b4 : ComplexPattern<iPTR, 2, "SelectAddrModeIndexedSVE<0, 15>
 // If the operands do not match this pattern, the pseudos are expanded
 // to a REG_SEQUENCE using the post-isel hook.
 
-class sme_form_transpose_x2_pseudo<RegisterClass multi_vector_class>
-    : Pseudo<(outs multi_vector_class:$tup), (ins ZPR:$zn0, ZPR:$zn1), []>,
-      Sched<[]> {
+def FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO :
+  Pseudo<(outs ZPR2:$tup),
+         (ins ZPR:$zn0, ZPR:$zn1), []>, Sched<[]>{
   let hasSideEffects = 0;
   let hasPostISelHook = 1;
 }
 
-def FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO    : sme_form_transpose_x2_pseudo<ZPR2>;
-def FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO : sme_form_transpose_x2_pseudo<ZPR2Mul2>;
-
-class sme_form_transpose_x4_pseudo<RegisterClass multi_vector_class>
-    : Pseudo<(outs multi_vector_class:$tup), (ins ZPR:$zn0, ZPR:$zn1, ZPR:$zn2, ZPR:$zn3), []>,
-      Sched<[]> {
+def FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO :
+  Pseudo<(outs ZPR4:$tup),
+         (ins ZPR:$zn0, ZPR:$zn1, ZPR:$zn2, ZPR:$zn3), []>, Sched<[]>{
   let hasSideEffects = 0;
   let hasPostISelHook = 1;
 }
 
-def FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO    : sme_form_transpose_x4_pseudo<ZPR4>;
-def FORM_TRANSPOSED_REG_TUPLE_MULX4_PSEUDO : sme_form_transpose_x4_pseudo<ZPR4Mul4>;
-
 def SDTZALoadStore : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisInt<2>]>;
 def AArch64SMELdr : SDNode<"AArch64ISD::SME_ZA_LDR", SDTZALoadStore,
                              [SDNPHasChain, SDNPSideEffect, SDNPMayLoad]>;
@@ -203,14 +197,14 @@ class SME2_ZA_TwoOp_VG2_Multi_Index_Pat<string name, SDPatternOperator intrinsic
                                         Operand imm_ty, ComplexPattern tileslice>
     : Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, index_ty:$offset)), vt:$Zn1, vt:$Zn2, vt:$Zm, (i32 imm_ty:$i)),
           (!cast<Instruction>(name # _PSEUDO) $base, $offset,
-                                              (FORM_TRANSPOSED_REG_TUPLE_MULX2_PSEUDO vt:$Zn1,vt:$Zn2), zpr_ty:$Zm, imm_ty:$i)>;
+                                              (FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO vt:$Zn1,vt:$Zn2), zpr_ty:$Zm, imm_ty:$i)>;
 
 class SME2_ZA_TwoOp_VG4_Multi_Index_Pat<string name, SDPatternOperator intrinsic, Operand index_ty, ZPRRegOp zpr_ty, ValueType vt,
                                         Operand imm_ty, ComplexPattern tileslice>
     : Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, index_ty:$offset)),
                      vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4, vt:$Zm, (i32 imm_ty:$i)),
           (!cast<Instruction>(name # _PSEUDO) $base, $offset,
-                                              (FORM_TRANSPOSED_REG_TUPLE_MULX4_PSEUDO vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4),
+                                              (FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4),
                                               zpr_ty:$Zm, imm_ty:$i)>;
 
 class SME2_Sat_Shift_VG2_Pat<string name, SDPatternOperator intrinsic, ValueType out_vt, ValueType in_vt, Operand imm_ty>



More information about the llvm-commits mailing list