[llvm-branch-commits] [llvm] 88f8980 - [AArch64][SVE] Add structured load/store opcodes to getMemOpInfo

Tom Stellard via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Feb 22 11:19:18 PST 2022


Author: Kerry McLaughlin
Date: 2022-02-22T11:12:31-08:00
New Revision: 88f8980a4d95b16a0dcd57bb6da298d4d60d8fd1

URL: https://github.com/llvm/llvm-project/commit/88f8980a4d95b16a0dcd57bb6da298d4d60d8fd1
DIFF: https://github.com/llvm/llvm-project/commit/88f8980a4d95b16a0dcd57bb6da298d4d60d8fd1.diff

LOG: [AArch64][SVE] Add structured load/store opcodes to getMemOpInfo

Currently, loading from or storing to a stack location with a structured load
or store crashes in isAArch64FrameOffsetLegal as the opcodes are not handled by
getMemOpInfo. This patch adds the opcodes for structured load/store instructions
with an immediate index to getMemOpInfo & getLoadStoreImmIdx, setting appropriate
values for the scale, width & min/max offsets.

Reviewed By: sdesmalen, david-arm

Differential Revision: https://reviews.llvm.org/D119338

(cherry picked from commit fc1b21228e39d63f1a2ab98026d548de66cb3760)

Added: 
    llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll
    llvm/test/CodeGen/AArch64/sve-ldN.mir
    llvm/test/CodeGen/AArch64/sve-stN.mir

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index a9191924129c..ea9c1b620065 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2270,6 +2270,19 @@ unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {
   case AArch64::LD1SW_D_IMM:
   case AArch64::LD1D_IMM:
 
+  case AArch64::LD2B_IMM:
+  case AArch64::LD2H_IMM:
+  case AArch64::LD2W_IMM:
+  case AArch64::LD2D_IMM:
+  case AArch64::LD3B_IMM:
+  case AArch64::LD3H_IMM:
+  case AArch64::LD3W_IMM:
+  case AArch64::LD3D_IMM:
+  case AArch64::LD4B_IMM:
+  case AArch64::LD4H_IMM:
+  case AArch64::LD4W_IMM:
+  case AArch64::LD4D_IMM:
+
   case AArch64::ST1B_IMM:
   case AArch64::ST1B_H_IMM:
   case AArch64::ST1B_S_IMM:
@@ -2281,6 +2294,19 @@ unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {
   case AArch64::ST1W_D_IMM:
   case AArch64::ST1D_IMM:
 
+  case AArch64::ST2B_IMM:
+  case AArch64::ST2H_IMM:
+  case AArch64::ST2W_IMM:
+  case AArch64::ST2D_IMM:
+  case AArch64::ST3B_IMM:
+  case AArch64::ST3H_IMM:
+  case AArch64::ST3W_IMM:
+  case AArch64::ST3D_IMM:
+  case AArch64::ST4B_IMM:
+  case AArch64::ST4H_IMM:
+  case AArch64::ST4W_IMM:
+  case AArch64::ST4D_IMM:
+
   case AArch64::LD1RB_IMM:
   case AArch64::LD1RB_H_IMM:
   case AArch64::LD1RB_S_IMM:
@@ -2897,6 +2923,45 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
     MinOffset = -8;
     MaxOffset = 7;
     break;
+  case AArch64::LD2B_IMM:
+  case AArch64::LD2H_IMM:
+  case AArch64::LD2W_IMM:
+  case AArch64::LD2D_IMM:
+  case AArch64::ST2B_IMM:
+  case AArch64::ST2H_IMM:
+  case AArch64::ST2W_IMM:
+  case AArch64::ST2D_IMM:
+    Scale = TypeSize::Scalable(32);
+    Width = SVEMaxBytesPerVector * 2;
+    MinOffset = -8;
+    MaxOffset = 7;
+    break;
+  case AArch64::LD3B_IMM:
+  case AArch64::LD3H_IMM:
+  case AArch64::LD3W_IMM:
+  case AArch64::LD3D_IMM:
+  case AArch64::ST3B_IMM:
+  case AArch64::ST3H_IMM:
+  case AArch64::ST3W_IMM:
+  case AArch64::ST3D_IMM:
+    Scale = TypeSize::Scalable(48);
+    Width = SVEMaxBytesPerVector * 3;
+    MinOffset = -8;
+    MaxOffset = 7;
+    break;
+  case AArch64::LD4B_IMM:
+  case AArch64::LD4H_IMM:
+  case AArch64::LD4W_IMM:
+  case AArch64::LD4D_IMM:
+  case AArch64::ST4B_IMM:
+  case AArch64::ST4H_IMM:
+  case AArch64::ST4W_IMM:
+  case AArch64::ST4D_IMM:
+    Scale = TypeSize::Scalable(64);
+    Width = SVEMaxBytesPerVector * 4;
+    MinOffset = -8;
+    MaxOffset = 7;
+    break;
   case AArch64::LD1B_H_IMM:
   case AArch64::LD1SB_H_IMM:
   case AArch64::LD1H_S_IMM:

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll b/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll
new file mode 100644
index 000000000000..f59891c31e93
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define void @st1d_fixed(<8 x double>* %ptr) #0 {
+; CHECK-LABEL: st1d_fixed:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    add x8, sp, #8
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x8]
+; CHECK-NEXT:    mov x8, #4
+; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %alloc = alloca [16 x double], i32 0
+  %bc = bitcast [16 x double]* %alloc to <8 x double>*
+  %load = load <8 x double>, <8 x double>* %bc
+  %strided.vec = shufflevector <8 x double> %load, <8 x double> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  store <8 x double> zeroinitializer, <8 x double>* %ptr
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" vscale_range(2,2) nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/sve-ldN.mir b/llvm/test/CodeGen/AArch64/sve-ldN.mir
new file mode 100644
index 000000000000..c59c53da806b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-ldN.mir
@@ -0,0 +1,261 @@
+# RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -run-pass=prologepilog -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -start-before=prologepilog -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-OFFSET
+
+--- |
+  define void @testcase_valid_offset() nounwind { entry: unreachable }
+  define void @testcase_offset_out_of_range() nounwind { entry: unreachable }
+...
+---
+name:            testcase_valid_offset
+tracksRegLiveness: true
+stack:
+  - { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector }
+body:             |
+  bb.0:
+    liveins: $p0
+
+    ; CHECK-LABEL: name: testcase_valid_offset
+    ; CHECK: liveins: $p0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.1)
+    ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
+    ; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, $sp, -8
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, $sp, 7
+    ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
+    ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1
+    ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1)
+    ; CHECK-NEXT: RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3
+
+    ; CHECK-OFFSET-LABEL: testcase_valid_offset:
+    ; CHECK-OFFSET: str x29, [sp, #-16]!
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #-32
+    ; CHECK-OFFSET-NEXT: ld2b { z0.b, z1.b }, p0/z, [sp, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: ld2b { z0.b, z1.b }, p0/z, [sp, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: ld2h { z0.h, z1.h }, p0/z, [sp, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: ld2h { z0.h, z1.h }, p0/z, [sp, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: ld2w { z0.s, z1.s }, p0/z, [sp, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: ld2w { z0.s, z1.s }, p0/z, [sp, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: ld2d { z0.d, z1.d }, p0/z, [sp, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: ld2d { z0.d, z1.d }, p0/z, [sp, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [sp, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [sp, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [sp, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [sp, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [sp, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [sp, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [sp, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [sp, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [sp, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [sp, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [sp, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [sp, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [sp, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [sp, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [sp, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [sp, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #31
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #1
+    ; CHECK-OFFSET-NEXT: ldr x29, [sp], #16
+    ; CHECK-OFFSET-NEXT: ret
+
+    renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, 7
+    renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, 7
+    renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, 7
+    renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, 7
+
+    renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, 7
+    renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, 7
+    renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, 7
+    renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, 7
+
+    renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, 7
+    renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, 7
+    renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, 7
+    renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, -8
+    renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, 7
+    RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3
+...
+---
+name:            testcase_offset_out_of_range
+tracksRegLiveness: true
+stack:
+  - { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector }
+body:             |
+  bb.0:
+    liveins: $p0
+
+    ; CHECK-LABEL: name: testcase_offset_out_of_range
+    ; CHECK: liveins: $p0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.1)
+    ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
+    ; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
+    ; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
+    ; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
+    ; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
+    ; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
+    ; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
+    ; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
+    ; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
+    ; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
+    ; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
+    ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1
+    ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1)
+    ; CHECK-NEXT: RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3
+
+    ; CHECK-OFFSET-LABEL: testcase_offset_out_of_range:
+    ; CHECK-OFFSET: str x29, [sp, #-16]!
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #-32
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
+    ; CHECK-OFFSET-NEXT: ld2b { z0.b, z1.b }, p0/z, [x8, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #2
+    ; CHECK-OFFSET-NEXT: ld2b { z0.b, z1.b }, p0/z, [x8, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
+    ; CHECK-OFFSET-NEXT: ld2h { z0.h, z1.h }, p0/z, [x8, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #2
+    ; CHECK-OFFSET-NEXT: ld2h { z0.h, z1.h }, p0/z, [x8, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
+    ; CHECK-OFFSET-NEXT: ld2w { z0.s, z1.s }, p0/z, [x8, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #2
+    ; CHECK-OFFSET-NEXT: ld2w { z0.s, z1.s }, p0/z, [x8, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
+    ; CHECK-OFFSET-NEXT: ld2d { z0.d, z1.d }, p0/z, [x8, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #2
+    ; CHECK-OFFSET-NEXT: ld2d { z0.d, z1.d }, p0/z, [x8, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
+    ; CHECK-OFFSET-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x8, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #3
+    ; CHECK-OFFSET-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x8, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
+    ; CHECK-OFFSET-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [x8, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #3
+    ; CHECK-OFFSET-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [x8, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
+    ; CHECK-OFFSET-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [x8, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #3
+    ; CHECK-OFFSET-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [x8, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
+    ; CHECK-OFFSET-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [x8, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #3
+    ; CHECK-OFFSET-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [x8, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
+    ; CHECK-OFFSET-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x8, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #4
+    ; CHECK-OFFSET-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x8, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
+    ; CHECK-OFFSET-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x8, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #4
+    ; CHECK-OFFSET-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x8, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
+    ; CHECK-OFFSET-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x8, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #4
+    ; CHECK-OFFSET-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x8, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
+    ; CHECK-OFFSET-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x8, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #4
+    ; CHECK-OFFSET-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x8, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #31
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #1
+    ; CHECK-OFFSET-NEXT: ldr x29, [sp], #16
+    ; CHECK-OFFSET-NEXT: ret
+
+    renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, 8
+    renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, 8
+    renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, 8
+    renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, 8
+
+    renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, 8
+    renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, 8
+    renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, 8
+    renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, 8
+
+    renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, 8
+    renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, 8
+    renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, 8
+    renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, -9
+    renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, 8
+    RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3
+...

diff  --git a/llvm/test/CodeGen/AArch64/sve-stN.mir b/llvm/test/CodeGen/AArch64/sve-stN.mir
new file mode 100644
index 000000000000..ac5c036a10bd
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-stN.mir
@@ -0,0 +1,261 @@
+# RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -run-pass=prologepilog -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -start-before=prologepilog -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-OFFSET
+
+--- |
+  define void @testcase_valid_offset() nounwind { entry: unreachable }
+  define void @testcase_offset_out_of_range() nounwind { entry: unreachable }
+...
+---
+name:            testcase_valid_offset
+tracksRegLiveness: true
+stack:
+  - { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector }
+body:             |
+  bb.0:
+    liveins: $p0, $z0
+
+    ; CHECK-LABEL: name: testcase_valid_offset
+    ; CHECK: liveins: $p0, $z0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.1)
+    ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
+    ; CHECK-NEXT: ST2B_IMM renamable $z0_z1, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST2B_IMM renamable $z0_z1, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST2H_IMM renamable $z0_z1, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST2H_IMM renamable $z0_z1, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST2W_IMM renamable $z0_z1, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST2W_IMM renamable $z0_z1, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST2D_IMM renamable $z0_z1, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST2D_IMM renamable $z0_z1, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST3B_IMM renamable $z0_z1_z2, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST3B_IMM renamable $z0_z1_z2, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST3H_IMM renamable $z0_z1_z2, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST3H_IMM renamable $z0_z1_z2, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST3W_IMM renamable $z0_z1_z2, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST3W_IMM renamable $z0_z1_z2, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST3D_IMM renamable $z0_z1_z2, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST3D_IMM renamable $z0_z1_z2, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, 7
+    ; CHECK-NEXT: ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, -8
+    ; CHECK-NEXT: ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, $sp, 7
+    ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
+    ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1
+    ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1)
+    ; CHECK-NEXT: RET_ReallyLR
+
+    ; CHECK-OFFSET-LABEL: testcase_valid_offset:
+    ; CHECK-OFFSET: str x29, [sp, #-16]!
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #-32
+    ; CHECK-OFFSET-NEXT: st2b { z0.b, z1.b }, p0, [sp, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: st2b { z0.b, z1.b }, p0, [sp, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: st2h { z0.h, z1.h }, p0, [sp, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: st2h { z0.h, z1.h }, p0, [sp, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: st2w { z0.s, z1.s }, p0, [sp, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: st2w { z0.s, z1.s }, p0, [sp, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: st2d { z0.d, z1.d }, p0, [sp, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: st2d { z0.d, z1.d }, p0, [sp, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [sp, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [sp, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: st3h { z0.h, z1.h, z2.h }, p0, [sp, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: st3h { z0.h, z1.h, z2.h }, p0, [sp, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: st3w { z0.s, z1.s, z2.s }, p0, [sp, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: st3w { z0.s, z1.s, z2.s }, p0, [sp, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: st3d { z0.d, z1.d, z2.d }, p0, [sp, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: st3d { z0.d, z1.d, z2.d }, p0, [sp, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [sp, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [sp, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [sp, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [sp, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [sp, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [sp, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [sp, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [sp, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #31
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #1
+    ; CHECK-OFFSET-NEXT: ldr x29, [sp], #16
+    ; CHECK-OFFSET-NEXT: ret
+
+    ST2B_IMM renamable $z0_z1, renamable $p0, %stack.0, -8
+    ST2B_IMM renamable $z0_z1, renamable $p0, %stack.0, 7
+    ST2H_IMM renamable $z0_z1, renamable $p0, %stack.0, -8
+    ST2H_IMM renamable $z0_z1, renamable $p0, %stack.0, 7
+    ST2W_IMM renamable $z0_z1, renamable $p0, %stack.0, -8
+    ST2W_IMM renamable $z0_z1, renamable $p0, %stack.0, 7
+    ST2D_IMM renamable $z0_z1, renamable $p0, %stack.0, -8
+    ST2D_IMM renamable $z0_z1, renamable $p0, %stack.0, 7
+
+    ST3B_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -8
+    ST3B_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 7
+    ST3H_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -8
+    ST3H_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 7
+    ST3W_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -8
+    ST3W_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 7
+    ST3D_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -8
+    ST3D_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 7
+
+    ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -8
+    ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 7
+    ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -8
+    ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 7
+    ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -8
+    ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 7
+    ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -8
+    ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 7
+    RET_ReallyLR
+...
+---
+name:            testcase_offset_out_of_range
+tracksRegLiveness: true
+stack:
+  - { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector }
+body:             |
+  bb.0:
+    liveins: $p0, $z0
+
+    ; CHECK-LABEL: name: testcase_offset_out_of_range
+    ; CHECK: liveins: $p0, $z0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.1)
+    ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
+    ; CHECK-NEXT: ST2B_IMM renamable $z0_z1, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
+    ; CHECK-NEXT: ST2B_IMM renamable $z0_z1, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
+    ; CHECK-NEXT: ST2H_IMM renamable $z0_z1, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
+    ; CHECK-NEXT: ST2H_IMM renamable $z0_z1, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
+    ; CHECK-NEXT: ST2W_IMM renamable $z0_z1, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
+    ; CHECK-NEXT: ST2W_IMM renamable $z0_z1, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
+    ; CHECK-NEXT: ST2D_IMM renamable $z0_z1, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
+    ; CHECK-NEXT: ST2D_IMM renamable $z0_z1, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
+    ; CHECK-NEXT: ST3B_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
+    ; CHECK-NEXT: ST3B_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
+    ; CHECK-NEXT: ST3H_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
+    ; CHECK-NEXT: ST3H_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
+    ; CHECK-NEXT: ST3W_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
+    ; CHECK-NEXT: ST3W_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
+    ; CHECK-NEXT: ST3D_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
+    ; CHECK-NEXT: ST3D_IMM renamable $z0_z1_z2, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
+    ; CHECK-NEXT: ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
+    ; CHECK-NEXT: ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
+    ; CHECK-NEXT: ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
+    ; CHECK-NEXT: ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
+    ; CHECK-NEXT: ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
+    ; CHECK-NEXT: ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
+    ; CHECK-NEXT: ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, -8
+    ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
+    ; CHECK-NEXT: ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, killed $x8, 7
+    ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
+    ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1
+    ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1)
+    ; CHECK-NEXT: RET_ReallyLR
+
+    ; CHECK-OFFSET-LABEL: testcase_offset_out_of_range
+    ; CHECK-OFFSET: str x29, [sp, #-16]!
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #-32
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
+    ; CHECK-OFFSET-NEXT: st2b { z0.b, z1.b }, p0, [x8, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #2
+    ; CHECK-OFFSET-NEXT: st2b { z0.b, z1.b }, p0, [x8, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
+    ; CHECK-OFFSET-NEXT: st2h { z0.h, z1.h }, p0, [x8, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #2
+    ; CHECK-OFFSET-NEXT: st2h { z0.h, z1.h }, p0, [x8, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
+    ; CHECK-OFFSET-NEXT: st2w { z0.s, z1.s }, p0, [x8, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #2
+    ; CHECK-OFFSET-NEXT: st2w { z0.s, z1.s }, p0, [x8, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
+    ; CHECK-OFFSET-NEXT: st2d { z0.d, z1.d }, p0, [x8, #-16, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #2
+    ; CHECK-OFFSET-NEXT: st2d { z0.d, z1.d }, p0, [x8, #14, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
+    ; CHECK-OFFSET-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x8, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #3
+    ; CHECK-OFFSET-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x8, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
+    ; CHECK-OFFSET-NEXT: st3h { z0.h, z1.h, z2.h }, p0, [x8, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #3
+    ; CHECK-OFFSET-NEXT: st3h { z0.h, z1.h, z2.h }, p0, [x8, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
+    ; CHECK-OFFSET-NEXT: st3w { z0.s, z1.s, z2.s }, p0, [x8, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #3
+    ; CHECK-OFFSET-NEXT: st3w { z0.s, z1.s, z2.s }, p0, [x8, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
+    ; CHECK-OFFSET-NEXT: st3d { z0.d, z1.d, z2.d }, p0, [x8, #-24, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #3
+    ; CHECK-OFFSET-NEXT: st3d { z0.d, z1.d, z2.d }, p0, [x8, #21, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
+    ; CHECK-OFFSET-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x8, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #4
+    ; CHECK-OFFSET-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x8, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
+    ; CHECK-OFFSET-NEXT: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x8, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #4
+    ; CHECK-OFFSET-NEXT: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x8, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
+    ; CHECK-OFFSET-NEXT: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x8, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #4
+    ; CHECK-OFFSET-NEXT: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x8, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
+    ; CHECK-OFFSET-NEXT: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x8, #-32, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl x8, sp, #4
+    ; CHECK-OFFSET-NEXT: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x8, #28, mul vl]
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #31
+    ; CHECK-OFFSET-NEXT: addvl sp, sp, #1
+    ; CHECK-OFFSET-NEXT: ldr x29, [sp], #16
+    ; CHECK-OFFSET-NEXT: ret
+
+    ST2B_IMM renamable $z0_z1, renamable $p0, %stack.0, -9
+    ST2B_IMM renamable $z0_z1, renamable $p0, %stack.0, 8
+    ST2H_IMM renamable $z0_z1, renamable $p0, %stack.0, -9
+    ST2H_IMM renamable $z0_z1, renamable $p0, %stack.0, 8
+    ST2W_IMM renamable $z0_z1, renamable $p0, %stack.0, -9
+    ST2W_IMM renamable $z0_z1, renamable $p0, %stack.0, 8
+    ST2D_IMM renamable $z0_z1, renamable $p0, %stack.0, -9
+    ST2D_IMM renamable $z0_z1, renamable $p0, %stack.0, 8
+
+    ST3B_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -9
+    ST3B_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 8
+    ST3H_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -9
+    ST3H_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 8
+    ST3W_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -9
+    ST3W_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 8
+    ST3D_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, -9
+    ST3D_IMM renamable $z0_z1_z2, renamable $p0, %stack.0, 8
+
+    ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -9
+    ST4B_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 8
+    ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -9
+    ST4H_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 8
+    ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -9
+    ST4W_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 8
+    ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, -9
+    ST4D_IMM renamable $z0_z1_z2_z3, renamable $p0, %stack.0, 8
+    RET_ReallyLR
+...


        


More information about the llvm-branch-commits mailing list