[llvm] [RISCV] Exploit sh3add/sh2add for stack offsets by shifted 12-bit constants (PR #87950)

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 7 15:51:11 PDT 2024


https://github.com/preames created https://github.com/llvm/llvm-project/pull/87950

If we're falling back to generic constant formation in a register + add/sub, we can check if we have a constant which is 12-bits but left shifted by 2 or 3. If so, we can use a sh2add or sh3add to perform the shift and add in a single instruction.

This is profitable when the unshifted constant would require two instructions (LUI/ADDI) to form, but is never harmful since we're going to need at least two instructions regardless of the constant value.

Since stacks are aligned to 16 bytes by default, sh3add allows addresing (aligned) data out to 2^14 (i.e. 16kb) in at most two instructions w/zba.

>From b4af0bbc344932f57abd148840d543a3ecdf2b6e Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 29 Mar 2024 15:56:18 -0700
Subject: [PATCH] [RISCV] Exploit sh3add/sh2add for stack offsets by shifted
 12-bit constants

If we're falling back to generic constant formation in a register + add/sub,
we can check if we have a constant which is 12-bits but left shifted by 2 or 3.
If so, we can use a sh2add or sh3add to perform the shift and add in a single
instruction.

This is profitable when the unshifted constant would require two instructions
(LUI/ADDI) to form, but is never harmful since we're going to need at least
two instructions regardless of the constant value.

Since stacks are aligned to 16 bytes by default, sh3add allows addresing
(aligned) data out to 2^14 (i.e. 16kb) in at most two instructions w/zba.
---
 llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp |  27 ++
 llvm/test/CodeGen/RISCV/prolog-epilogue.ll  | 299 ++++++++++------
 llvm/test/CodeGen/RISCV/stack-offset.ll     | 375 +++++++++++++-------
 3 files changed, 471 insertions(+), 230 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 11c3f2d57eb00f..848b6d33f7d90f 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -239,6 +239,33 @@ void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB,
     return;
   }
 
+  // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
+  // instruction.  This saves 1 instruction over the full lui/addi+add fallback
+  // path.  Note that the fallback path uses a minimum of 2 instructions even
+  // for "cheap" immediates so eagerly using the shNadd is no worse even if not
+  // better.  Note that the sh1add case is fully covered by the 2x addi case
+  // just above and is thus ommitted.
+  const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
+  if (STI.hasStdExtZba()) {
+    unsigned Opc = 0;
+    if (isShiftedInt<12, 3>(Val)) {
+      Opc = RISCV::SH3ADD;
+      Val = Val >> 3;
+    } else if (isShiftedInt<12, 2>(Val)) {
+      Opc = RISCV::SH2ADD;
+      Val = Val >> 2;
+    }
+    if (Opc) {
+      Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+      TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
+      BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
+        .addReg(ScratchReg, RegState::Kill)
+        .addReg(SrcReg, getKillRegState(KillSrcReg))
+        .setMIFlag(Flag);
+      return;
+    }
+  }
+
   unsigned Opc = RISCV::ADD;
   if (Val < 0) {
     Val = -Val;
diff --git a/llvm/test/CodeGen/RISCV/prolog-epilogue.ll b/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
index 700481d9e13064..cc4e177f9b2903 100644
--- a/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
+++ b/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
@@ -143,39 +143,73 @@ define void @frame_4096b() {
 
 ;; 2^12-16+2032
 define void @frame_4kb() {
-; RV32-LABEL: frame_4kb:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    lui a0, 1
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa_offset 6128
-; RV32-NEXT:    addi a0, sp, 12
-; RV32-NEXT:    call callee
-; RV32-NEXT:    lui a0, 1
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: frame_4kb:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    lui a0, 1
+; RV32I-NEXT:    sub sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 6128
+; RV32I-NEXT:    addi a0, sp, 12
+; RV32I-NEXT:    call callee
+; RV32I-NEXT:    lui a0, 1
+; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: frame_4kb:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    lui a0, 1
-; RV64-NEXT:    sub sp, sp, a0
-; RV64-NEXT:    .cfi_def_cfa_offset 6128
-; RV64-NEXT:    addi a0, sp, 8
-; RV64-NEXT:    call callee
-; RV64-NEXT:    lui a0, 1
-; RV64-NEXT:    add sp, sp, a0
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: frame_4kb:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    li a0, -512
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 6128
+; RV32ZBA-NEXT:    addi a0, sp, 12
+; RV32ZBA-NEXT:    call callee
+; RV32ZBA-NEXT:    li a0, 512
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: frame_4kb:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    lui a0, 1
+; RV64I-NEXT:    sub sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 6128
+; RV64I-NEXT:    addi a0, sp, 8
+; RV64I-NEXT:    call callee
+; RV64I-NEXT:    lui a0, 1
+; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: frame_4kb:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    li a0, -512
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 6128
+; RV64ZBA-NEXT:    addi a0, sp, 8
+; RV64ZBA-NEXT:    call callee
+; RV64ZBA-NEXT:    li a0, 512
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %a = alloca [6112 x i8]
   call void @callee(ptr %a)
   ret void
@@ -183,39 +217,73 @@ define void @frame_4kb() {
 
 ;; 2^13-16+2032
 define void @frame_8kb() {
-; RV32-LABEL: frame_8kb:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    lui a0, 2
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa_offset 10224
-; RV32-NEXT:    addi a0, sp, 12
-; RV32-NEXT:    call callee
-; RV32-NEXT:    lui a0, 2
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: frame_8kb:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    lui a0, 2
+; RV32I-NEXT:    sub sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 10224
+; RV32I-NEXT:    addi a0, sp, 12
+; RV32I-NEXT:    call callee
+; RV32I-NEXT:    lui a0, 2
+; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: frame_8kb:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    lui a0, 2
-; RV64-NEXT:    sub sp, sp, a0
-; RV64-NEXT:    .cfi_def_cfa_offset 10224
-; RV64-NEXT:    addi a0, sp, 8
-; RV64-NEXT:    call callee
-; RV64-NEXT:    lui a0, 2
-; RV64-NEXT:    add sp, sp, a0
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: frame_8kb:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    li a0, -1024
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 10224
+; RV32ZBA-NEXT:    addi a0, sp, 12
+; RV32ZBA-NEXT:    call callee
+; RV32ZBA-NEXT:    li a0, 1024
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: frame_8kb:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    lui a0, 2
+; RV64I-NEXT:    sub sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 10224
+; RV64I-NEXT:    addi a0, sp, 8
+; RV64I-NEXT:    call callee
+; RV64I-NEXT:    lui a0, 2
+; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: frame_8kb:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    li a0, -1024
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 10224
+; RV64ZBA-NEXT:    addi a0, sp, 8
+; RV64ZBA-NEXT:    call callee
+; RV64ZBA-NEXT:    li a0, 1024
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %a = alloca [10208 x i8]
   call void @callee(ptr %a)
   ret void
@@ -223,39 +291,73 @@ define void @frame_8kb() {
 
 ;; 2^14-16+2032
 define void @frame_16kb() {
-; RV32-LABEL: frame_16kb:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    lui a0, 4
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa_offset 18416
-; RV32-NEXT:    addi a0, sp, 12
-; RV32-NEXT:    call callee
-; RV32-NEXT:    lui a0, 4
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: frame_16kb:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    lui a0, 4
+; RV32I-NEXT:    sub sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 18416
+; RV32I-NEXT:    addi a0, sp, 12
+; RV32I-NEXT:    call callee
+; RV32I-NEXT:    lui a0, 4
+; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: frame_16kb:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    lui a0, 4
-; RV64-NEXT:    sub sp, sp, a0
-; RV64-NEXT:    .cfi_def_cfa_offset 18416
-; RV64-NEXT:    addi a0, sp, 8
-; RV64-NEXT:    call callee
-; RV64-NEXT:    lui a0, 4
-; RV64-NEXT:    add sp, sp, a0
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: frame_16kb:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    li a0, -2048
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 18416
+; RV32ZBA-NEXT:    addi a0, sp, 12
+; RV32ZBA-NEXT:    call callee
+; RV32ZBA-NEXT:    lui a0, 4
+; RV32ZBA-NEXT:    add sp, sp, a0
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: frame_16kb:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    lui a0, 4
+; RV64I-NEXT:    sub sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 18416
+; RV64I-NEXT:    addi a0, sp, 8
+; RV64I-NEXT:    call callee
+; RV64I-NEXT:    lui a0, 4
+; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: frame_16kb:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    li a0, -2048
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 18416
+; RV64ZBA-NEXT:    addi a0, sp, 8
+; RV64ZBA-NEXT:    call callee
+; RV64ZBA-NEXT:    lui a0, 4
+; RV64ZBA-NEXT:    add sp, sp, a0
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %a = alloca [18400 x i8]
   call void @callee(ptr %a)
   ret void
@@ -300,8 +402,3 @@ define void @frame_32kb() {
   call void @callee(ptr %a)
   ret void
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; RV32I: {{.*}}
-; RV32ZBA: {{.*}}
-; RV64I: {{.*}}
-; RV64ZBA: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/stack-offset.ll b/llvm/test/CodeGen/RISCV/stack-offset.ll
index 6a24e5dcdbc3dc..cc81fd62eba9da 100644
--- a/llvm/test/CodeGen/RISCV/stack-offset.ll
+++ b/llvm/test/CodeGen/RISCV/stack-offset.ll
@@ -11,55 +11,101 @@
 declare void @inspect(...)
 
 define void @test() {
-; RV32-LABEL: test:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    addi sp, sp, -2048
-; RV32-NEXT:    addi sp, sp, -1120
-; RV32-NEXT:    .cfi_def_cfa_offset 5200
-; RV32-NEXT:    addi a0, sp, 12
-; RV32-NEXT:    addi a1, sp, 2047
-; RV32-NEXT:    addi a1, a1, 13
-; RV32-NEXT:    lui a2, 1
-; RV32-NEXT:    addi a2, a2, 12
-; RV32-NEXT:    add a2, sp, a2
-; RV32-NEXT:    lui a3, 1
-; RV32-NEXT:    addi a3, a3, 1036
-; RV32-NEXT:    add a3, sp, a3
-; RV32-NEXT:    call inspect
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    addi sp, sp, 1136
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: test:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    addi sp, sp, -2048
+; RV32I-NEXT:    addi sp, sp, -1120
+; RV32I-NEXT:    .cfi_def_cfa_offset 5200
+; RV32I-NEXT:    addi a0, sp, 12
+; RV32I-NEXT:    addi a1, sp, 2047
+; RV32I-NEXT:    addi a1, a1, 13
+; RV32I-NEXT:    lui a2, 1
+; RV32I-NEXT:    addi a2, a2, 12
+; RV32I-NEXT:    add a2, sp, a2
+; RV32I-NEXT:    lui a3, 1
+; RV32I-NEXT:    addi a3, a3, 1036
+; RV32I-NEXT:    add a3, sp, a3
+; RV32I-NEXT:    call inspect
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    addi sp, sp, 1136
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: test:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    addi sp, sp, -2048
-; RV64-NEXT:    addi sp, sp, -1120
-; RV64-NEXT:    .cfi_def_cfa_offset 5200
-; RV64-NEXT:    addi a0, sp, 8
-; RV64-NEXT:    addi a1, sp, 2047
-; RV64-NEXT:    addi a1, a1, 9
-; RV64-NEXT:    lui a2, 1
-; RV64-NEXT:    addiw a2, a2, 8
-; RV64-NEXT:    add a2, sp, a2
-; RV64-NEXT:    lui a3, 1
-; RV64-NEXT:    addiw a3, a3, 1032
-; RV64-NEXT:    add a3, sp, a3
-; RV64-NEXT:    call inspect
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    addi sp, sp, 1136
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: test:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    addi sp, sp, -2048
+; RV32ZBA-NEXT:    addi sp, sp, -1120
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 5200
+; RV32ZBA-NEXT:    addi a0, sp, 12
+; RV32ZBA-NEXT:    addi a1, sp, 2047
+; RV32ZBA-NEXT:    addi a1, a1, 13
+; RV32ZBA-NEXT:    li a2, 1027
+; RV32ZBA-NEXT:    sh2add a2, a2, sp
+; RV32ZBA-NEXT:    li a3, 1283
+; RV32ZBA-NEXT:    sh2add a3, a3, sp
+; RV32ZBA-NEXT:    call inspect
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    addi sp, sp, 1136
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: test:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    addi sp, sp, -2048
+; RV64I-NEXT:    addi sp, sp, -1120
+; RV64I-NEXT:    .cfi_def_cfa_offset 5200
+; RV64I-NEXT:    addi a0, sp, 8
+; RV64I-NEXT:    addi a1, sp, 2047
+; RV64I-NEXT:    addi a1, a1, 9
+; RV64I-NEXT:    lui a2, 1
+; RV64I-NEXT:    addiw a2, a2, 8
+; RV64I-NEXT:    add a2, sp, a2
+; RV64I-NEXT:    lui a3, 1
+; RV64I-NEXT:    addiw a3, a3, 1032
+; RV64I-NEXT:    add a3, sp, a3
+; RV64I-NEXT:    call inspect
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    addi sp, sp, 1136
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: test:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    addi sp, sp, -2048
+; RV64ZBA-NEXT:    addi sp, sp, -1120
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 5200
+; RV64ZBA-NEXT:    addi a0, sp, 8
+; RV64ZBA-NEXT:    addi a1, sp, 2047
+; RV64ZBA-NEXT:    addi a1, a1, 9
+; RV64ZBA-NEXT:    li a2, 513
+; RV64ZBA-NEXT:    sh3add a2, a2, sp
+; RV64ZBA-NEXT:    li a3, 641
+; RV64ZBA-NEXT:    sh3add a3, a3, sp
+; RV64ZBA-NEXT:    call inspect
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    addi sp, sp, 1136
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %p4 = alloca [64 x i8], align 1
   %p3 = alloca [1024 x i8], align 1
   %p2 = alloca [2048 x i8], align 1
@@ -69,45 +115,83 @@ define void @test() {
 }
 
 define void @align_8() {
-; RV32-LABEL: align_8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    addi sp, sp, -2048
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 4112
-; RV32-NEXT:    addi a0, sp, 7
-; RV32-NEXT:    lui a1, 1
-; RV32-NEXT:    addi a1, a1, 8
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    call inspect
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    addi sp, sp, 48
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: align_8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    addi sp, sp, -2048
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    .cfi_def_cfa_offset 4112
+; RV32I-NEXT:    addi a0, sp, 7
+; RV32I-NEXT:    lui a1, 1
+; RV32I-NEXT:    addi a1, a1, 8
+; RV32I-NEXT:    add a1, sp, a1
+; RV32I-NEXT:    call inspect
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: align_8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    addi sp, sp, -2048
-; RV64-NEXT:    addi sp, sp, -48
-; RV64-NEXT:    .cfi_def_cfa_offset 4128
-; RV64-NEXT:    addi a0, sp, 15
-; RV64-NEXT:    lui a1, 1
-; RV64-NEXT:    addiw a1, a1, 16
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    call inspect
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    addi sp, sp, 64
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: align_8:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    addi sp, sp, -2048
+; RV32ZBA-NEXT:    addi sp, sp, -32
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 4112
+; RV32ZBA-NEXT:    addi a0, sp, 7
+; RV32ZBA-NEXT:    li a1, 513
+; RV32ZBA-NEXT:    sh3add a1, a1, sp
+; RV32ZBA-NEXT:    call inspect
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    addi sp, sp, 48
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: align_8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    addi sp, sp, -2048
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    .cfi_def_cfa_offset 4128
+; RV64I-NEXT:    addi a0, sp, 15
+; RV64I-NEXT:    lui a1, 1
+; RV64I-NEXT:    addiw a1, a1, 16
+; RV64I-NEXT:    add a1, sp, a1
+; RV64I-NEXT:    call inspect
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: align_8:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    addi sp, sp, -2048
+; RV64ZBA-NEXT:    addi sp, sp, -48
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 4128
+; RV64ZBA-NEXT:    addi a0, sp, 15
+; RV64ZBA-NEXT:    li a1, 514
+; RV64ZBA-NEXT:    sh3add a1, a1, sp
+; RV64ZBA-NEXT:    call inspect
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    addi sp, sp, 64
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %p2 = alloca i8, align 8
   %p1 = alloca [4097 x i8], align 1
   call void (...) @inspect(ptr %p1, ptr %p2)
@@ -115,45 +199,83 @@ define void @align_8() {
 }
 
 define void @align_4() {
-; RV32-LABEL: align_4:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    addi sp, sp, -2048
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 4112
-; RV32-NEXT:    addi a0, sp, 7
-; RV32-NEXT:    lui a1, 1
-; RV32-NEXT:    addi a1, a1, 8
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    call inspect
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    addi sp, sp, 48
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: align_4:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    addi sp, sp, -2048
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    .cfi_def_cfa_offset 4112
+; RV32I-NEXT:    addi a0, sp, 7
+; RV32I-NEXT:    lui a1, 1
+; RV32I-NEXT:    addi a1, a1, 8
+; RV32I-NEXT:    add a1, sp, a1
+; RV32I-NEXT:    call inspect
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: align_4:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    addi sp, sp, -2048
-; RV64-NEXT:    addi sp, sp, -48
-; RV64-NEXT:    .cfi_def_cfa_offset 4128
-; RV64-NEXT:    addi a0, sp, 19
-; RV64-NEXT:    lui a1, 1
-; RV64-NEXT:    addiw a1, a1, 20
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    call inspect
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    addi sp, sp, 64
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: align_4:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    addi sp, sp, -2048
+; RV32ZBA-NEXT:    addi sp, sp, -32
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 4112
+; RV32ZBA-NEXT:    addi a0, sp, 7
+; RV32ZBA-NEXT:    li a1, 513
+; RV32ZBA-NEXT:    sh3add a1, a1, sp
+; RV32ZBA-NEXT:    call inspect
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    addi sp, sp, 48
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: align_4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    addi sp, sp, -2048
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    .cfi_def_cfa_offset 4128
+; RV64I-NEXT:    addi a0, sp, 19
+; RV64I-NEXT:    lui a1, 1
+; RV64I-NEXT:    addiw a1, a1, 20
+; RV64I-NEXT:    add a1, sp, a1
+; RV64I-NEXT:    call inspect
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: align_4:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    addi sp, sp, -2048
+; RV64ZBA-NEXT:    addi sp, sp, -48
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 4128
+; RV64ZBA-NEXT:    addi a0, sp, 19
+; RV64ZBA-NEXT:    li a1, 1029
+; RV64ZBA-NEXT:    sh2add a1, a1, sp
+; RV64ZBA-NEXT:    call inspect
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    addi sp, sp, 64
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %p2 = alloca i8, align 4
   %p1 = alloca [4097 x i8], align 1
   call void (...) @inspect(ptr %p1, ptr %p2)
@@ -252,8 +374,3 @@ define void @align_1() {
   call void (...) @inspect(ptr %p1, ptr %p2)
   ret void
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; RV32I: {{.*}}
-; RV32ZBA: {{.*}}
-; RV64I: {{.*}}
-; RV64ZBA: {{.*}}



More information about the llvm-commits mailing list