[llvm] da675b9 - [RISCV] Expand test coverage of stack offsets between 2^11 and 2^15

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 7 15:22:52 PDT 2024


Author: Philip Reames
Date: 2024-04-07T15:22:25-07:00
New Revision: da675b922cca3dc9a76642d792e882979a3d8c82

URL: https://github.com/llvm/llvm-project/commit/da675b922cca3dc9a76642d792e882979a3d8c82
DIFF: https://github.com/llvm/llvm-project/commit/da675b922cca3dc9a76642d792e882979a3d8c82.diff

LOG: [RISCV] Expand test coverage of stack offsets between 2^11 and 2^15

Adds two sets of tests.  First, one for prolog/epilogue insertions where
the second stack adjustment can be done with shNadd for zba.  Second, a
set of tests with offsets off SP in the same ranges, but also adding
varying alignments.

Added: 
    llvm/test/CodeGen/RISCV/prolog-epilogue.ll
    llvm/test/CodeGen/RISCV/stack-offset.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/prolog-epilogue.ll b/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
new file mode 100644
index 00000000000000..700481d9e13064
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
@@ -0,0 +1,307 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32,RV32I
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+zba < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32,RV32ZBA
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64,RV64I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+zba < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64,RV64ZBA
+
+declare void @callee(ptr)
+
+define void @frame_16b() {
+; RV32-LABEL: frame_16b:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    li a0, 0
+; RV32-NEXT:    call callee
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: frame_16b:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    li a0, 0
+; RV64-NEXT:    call callee
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  call void @callee(ptr null)
+  ret void
+}
+
+define void @frame_1024b() {
+; RV32-LABEL: frame_1024b:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -1024
+; RV32-NEXT:    .cfi_def_cfa_offset 1024
+; RV32-NEXT:    sw ra, 1020(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    call callee
+; RV32-NEXT:    lw ra, 1020(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 1024
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: frame_1024b:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -1024
+; RV64-NEXT:    .cfi_def_cfa_offset 1024
+; RV64-NEXT:    sd ra, 1016(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    call callee
+; RV64-NEXT:    ld ra, 1016(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 1024
+; RV64-NEXT:    ret
+  %a = alloca [1008 x i8]
+  call void @callee(ptr %a)
+  ret void
+}
+
+define void @frame_2048b() {
+; RV32-LABEL: frame_2048b:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 2048
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    call callee
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: frame_2048b:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 2048
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    call callee
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %a = alloca [2032 x i8]
+  call void @callee(ptr %a)
+  ret void
+}
+
+define void @frame_4096b() {
+; RV32-LABEL: frame_4096b:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    addi sp, sp, -2048
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 4096
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    call callee
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: frame_4096b:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    addi sp, sp, -2048
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 4096
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    call callee
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %a = alloca [4080 x i8]
+  call void @callee(ptr %a)
+  ret void
+}
+
+;; 2^12-16+2032
+define void @frame_4kb() {
+; RV32-LABEL: frame_4kb:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa_offset 6128
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    call callee
+; RV32-NEXT:    lui a0, 1
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: frame_4kb:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa_offset 6128
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    call callee
+; RV64-NEXT:    lui a0, 1
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %a = alloca [6112 x i8]
+  call void @callee(ptr %a)
+  ret void
+}
+
+;; 2^13-16+2032
+define void @frame_8kb() {
+; RV32-LABEL: frame_8kb:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    lui a0, 2
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa_offset 10224
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    call callee
+; RV32-NEXT:    lui a0, 2
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: frame_8kb:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    lui a0, 2
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa_offset 10224
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    call callee
+; RV64-NEXT:    lui a0, 2
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %a = alloca [10208 x i8]
+  call void @callee(ptr %a)
+  ret void
+}
+
+;; 2^14-16+2032
+define void @frame_16kb() {
+; RV32-LABEL: frame_16kb:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    lui a0, 4
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa_offset 18416
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    call callee
+; RV32-NEXT:    lui a0, 4
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: frame_16kb:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    lui a0, 4
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa_offset 18416
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    call callee
+; RV64-NEXT:    lui a0, 4
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %a = alloca [18400 x i8]
+  call void @callee(ptr %a)
+  ret void
+}
+
+;; 2^15-16+2032
+define void @frame_32kb() {
+; RV32-LABEL: frame_32kb:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    lui a0, 8
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    .cfi_def_cfa_offset 34800
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    call callee
+; RV32-NEXT:    lui a0, 8
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: frame_32kb:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    lui a0, 8
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    .cfi_def_cfa_offset 34800
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    call callee
+; RV64-NEXT:    lui a0, 8
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %a = alloca [34784 x i8]
+  call void @callee(ptr %a)
+  ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32I: {{.*}}
+; RV32ZBA: {{.*}}
+; RV64I: {{.*}}
+; RV64ZBA: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/stack-offset.ll b/llvm/test/CodeGen/RISCV/stack-offset.ll
new file mode 100644
index 00000000000000..6a24e5dcdbc3dc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/stack-offset.ll
@@ -0,0 +1,259 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32,RV32I
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+zba < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32,RV32ZBA
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64,RV64I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+zba < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64,RV64ZBA
+
+declare void @inspect(...)
+
+define void @test() {
+; RV32-LABEL: test:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    addi sp, sp, -2048
+; RV32-NEXT:    addi sp, sp, -1120
+; RV32-NEXT:    .cfi_def_cfa_offset 5200
+; RV32-NEXT:    addi a0, sp, 12
+; RV32-NEXT:    addi a1, sp, 2047
+; RV32-NEXT:    addi a1, a1, 13
+; RV32-NEXT:    lui a2, 1
+; RV32-NEXT:    addi a2, a2, 12
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    lui a3, 1
+; RV32-NEXT:    addi a3, a3, 1036
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    call inspect
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    addi sp, sp, 1136
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    addi sp, sp, -2048
+; RV64-NEXT:    addi sp, sp, -1120
+; RV64-NEXT:    .cfi_def_cfa_offset 5200
+; RV64-NEXT:    addi a0, sp, 8
+; RV64-NEXT:    addi a1, sp, 2047
+; RV64-NEXT:    addi a1, a1, 9
+; RV64-NEXT:    lui a2, 1
+; RV64-NEXT:    addiw a2, a2, 8
+; RV64-NEXT:    add a2, sp, a2
+; RV64-NEXT:    lui a3, 1
+; RV64-NEXT:    addiw a3, a3, 1032
+; RV64-NEXT:    add a3, sp, a3
+; RV64-NEXT:    call inspect
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    addi sp, sp, 1136
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %p4 = alloca [64 x i8], align 1
+  %p3 = alloca [1024 x i8], align 1
+  %p2 = alloca [2048 x i8], align 1
+  %p1 = alloca [2048 x i8], align 1
+  call void (...) @inspect(ptr %p1, ptr %p2, ptr %p3, ptr %p4)
+  ret void
+}
+
+define void @align_8() {
+; RV32-LABEL: align_8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    addi sp, sp, -2048
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 4112
+; RV32-NEXT:    addi a0, sp, 7
+; RV32-NEXT:    lui a1, 1
+; RV32-NEXT:    addi a1, a1, 8
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    call inspect
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: align_8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    addi sp, sp, -2048
+; RV64-NEXT:    addi sp, sp, -48
+; RV64-NEXT:    .cfi_def_cfa_offset 4128
+; RV64-NEXT:    addi a0, sp, 15
+; RV64-NEXT:    lui a1, 1
+; RV64-NEXT:    addiw a1, a1, 16
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    call inspect
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %p2 = alloca i8, align 8
+  %p1 = alloca [4097 x i8], align 1
+  call void (...) @inspect(ptr %p1, ptr %p2)
+  ret void
+}
+
+define void @align_4() {
+; RV32-LABEL: align_4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    addi sp, sp, -2048
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 4112
+; RV32-NEXT:    addi a0, sp, 7
+; RV32-NEXT:    lui a1, 1
+; RV32-NEXT:    addi a1, a1, 8
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    call inspect
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: align_4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    addi sp, sp, -2048
+; RV64-NEXT:    addi sp, sp, -48
+; RV64-NEXT:    .cfi_def_cfa_offset 4128
+; RV64-NEXT:    addi a0, sp, 19
+; RV64-NEXT:    lui a1, 1
+; RV64-NEXT:    addiw a1, a1, 20
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    call inspect
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %p2 = alloca i8, align 4
+  %p1 = alloca [4097 x i8], align 1
+  call void (...) @inspect(ptr %p1, ptr %p2)
+  ret void
+}
+
+define void @align_2() {
+; RV32-LABEL: align_2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    addi sp, sp, -2048
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 4112
+; RV32-NEXT:    addi a0, sp, 9
+; RV32-NEXT:    lui a1, 1
+; RV32-NEXT:    addi a1, a1, 10
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    call inspect
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: align_2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    addi sp, sp, -2048
+; RV64-NEXT:    addi sp, sp, -48
+; RV64-NEXT:    .cfi_def_cfa_offset 4128
+; RV64-NEXT:    addi a0, sp, 21
+; RV64-NEXT:    lui a1, 1
+; RV64-NEXT:    addiw a1, a1, 22
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    call inspect
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %p2 = alloca i8, align 2
+  %p1 = alloca [4097 x i8], align 1
+  call void (...) @inspect(ptr %p1, ptr %p2)
+  ret void
+}
+
+
+define void @align_1() {
+; RV32-LABEL: align_1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    addi sp, sp, -2048
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 4112
+; RV32-NEXT:    addi a0, sp, 10
+; RV32-NEXT:    lui a1, 1
+; RV32-NEXT:    addi a1, a1, 11
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    call inspect
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    addi sp, sp, 48
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: align_1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    addi sp, sp, -2048
+; RV64-NEXT:    addi sp, sp, -48
+; RV64-NEXT:    .cfi_def_cfa_offset 4128
+; RV64-NEXT:    addi a0, sp, 22
+; RV64-NEXT:    lui a1, 1
+; RV64-NEXT:    addiw a1, a1, 23
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    call inspect
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+  %p2 = alloca i8, align 1
+  %p1 = alloca [4097 x i8], align 1
+  call void (...) @inspect(ptr %p1, ptr %p2)
+  ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32I: {{.*}}
+; RV32ZBA: {{.*}}
+; RV64I: {{.*}}
+; RV64ZBA: {{.*}}


        


More information about the llvm-commits mailing list