[llvm] [RISCV] Exploit sh3add/sh2add for stack offsets by shifted 12-bit constants (PR #87950)

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 8 14:52:26 PDT 2024


https://github.com/preames updated https://github.com/llvm/llvm-project/pull/87950

>From 64db7387a16d283e618b91f97ad8fe7bf1220ab5 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 29 Mar 2024 15:56:18 -0700
Subject: [PATCH 1/2] [RISCV] Exploit sh3add/sh2add for stack offsets by
 shifted 12-bit constants

If we're falling back to generic constant formation in a register + add/sub,
we can check if we have a constant which is 12-bits but left shifted by 2 or 3.
If so, we can use a sh2add or sh3add to perform the shift and add in a single
instruction.

This is profitable when the unshifted constant would require two instructions
(LUI/ADDI) to form, and neutral if we'd need at least one uncompressed
instruction.  We need to avoid this case when c.lui could be used to form
the immediate to avoid a size regression.  c.lui/c.li are our only compressed
single instruction immediate cases, and the c.li can't reach here.

Since stacks are aligned to 16 bytes by default, sh3add allows addresing
(aligned) data out to 2^14 (i.e. 16kb) in at most two instructions w/zba.
---
 llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp |  25 ++
 llvm/test/CodeGen/RISCV/prolog-epilogue.ll  | 323 +++++++++++------
 llvm/test/CodeGen/RISCV/stack-offset.ll     | 375 +++++++++++++-------
 3 files changed, 481 insertions(+), 242 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 713260b090e9cf..759334feefc64c 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -248,6 +248,31 @@ void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB,
     return;
   }
 
+  // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
+  // instruction.  This saves 1 instruction over the full lui/addi+add fallback
+  // path.  We avoid anything which can be done with a single lui as it night
+  // be compressible.  Note that the sh1add case is fully covered by the 2x addi
+  // case just above and is thus ommitted.
+  if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
+    unsigned Opc = 0;
+    if (isShiftedInt<12, 3>(Val)) {
+      Opc = RISCV::SH3ADD;
+      Val = Val >> 3;
+    } else if (isShiftedInt<12, 2>(Val)) {
+      Opc = RISCV::SH2ADD;
+      Val = Val >> 2;
+    }
+    if (Opc) {
+      Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+      TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
+      BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
+          .addReg(ScratchReg, RegState::Kill)
+          .addReg(SrcReg, getKillRegState(KillSrcReg))
+          .setMIFlag(Flag);
+      return;
+    }
+  }
+
   unsigned Opc = RISCV::ADD;
   if (Val < 0) {
     Val = -Val;
diff --git a/llvm/test/CodeGen/RISCV/prolog-epilogue.ll b/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
index 1204499deef5f3..50b236470ae644 100644
--- a/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
+++ b/llvm/test/CodeGen/RISCV/prolog-epilogue.ll
@@ -182,43 +182,77 @@ define void @frame_4kb() {
 }
 
 define void @frame_4kb_offset_128() {
-; RV32-LABEL: frame_4kb_offset_128:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    lui a0, 1
-; RV32-NEXT:    addi a0, a0, 128
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa_offset 6256
-; RV32-NEXT:    addi a0, sp, 12
-; RV32-NEXT:    call callee
-; RV32-NEXT:    lui a0, 1
-; RV32-NEXT:    addi a0, a0, 128
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: frame_4kb_offset_128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    lui a0, 1
+; RV32I-NEXT:    addi a0, a0, 128
+; RV32I-NEXT:    sub sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 6256
+; RV32I-NEXT:    addi a0, sp, 12
+; RV32I-NEXT:    call callee
+; RV32I-NEXT:    lui a0, 1
+; RV32I-NEXT:    addi a0, a0, 128
+; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: frame_4kb_offset_128:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    lui a0, 1
-; RV64-NEXT:    addiw a0, a0, 128
-; RV64-NEXT:    sub sp, sp, a0
-; RV64-NEXT:    .cfi_def_cfa_offset 6256
-; RV64-NEXT:    addi a0, sp, 8
-; RV64-NEXT:    call callee
-; RV64-NEXT:    lui a0, 1
-; RV64-NEXT:    addiw a0, a0, 128
-; RV64-NEXT:    add sp, sp, a0
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: frame_4kb_offset_128:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    li a0, -528
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 6256
+; RV32ZBA-NEXT:    addi a0, sp, 12
+; RV32ZBA-NEXT:    call callee
+; RV32ZBA-NEXT:    li a0, 528
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: frame_4kb_offset_128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    lui a0, 1
+; RV64I-NEXT:    addiw a0, a0, 128
+; RV64I-NEXT:    sub sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 6256
+; RV64I-NEXT:    addi a0, sp, 8
+; RV64I-NEXT:    call callee
+; RV64I-NEXT:    lui a0, 1
+; RV64I-NEXT:    addiw a0, a0, 128
+; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: frame_4kb_offset_128:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    li a0, -528
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 6256
+; RV64ZBA-NEXT:    addi a0, sp, 8
+; RV64ZBA-NEXT:    call callee
+; RV64ZBA-NEXT:    li a0, 528
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %a = alloca [6240 x i8]
   call void @callee(ptr %a)
   ret void
@@ -266,86 +300,154 @@ define void @frame_8kb() {
 }
 
 define void @frame_8kb_offset_128() {
-; RV32-LABEL: frame_8kb_offset_128:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    lui a0, 2
-; RV32-NEXT:    addi a0, a0, 128
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa_offset 10352
-; RV32-NEXT:    addi a0, sp, 12
-; RV32-NEXT:    call callee
-; RV32-NEXT:    lui a0, 2
-; RV32-NEXT:    addi a0, a0, 128
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: frame_8kb_offset_128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    lui a0, 2
+; RV32I-NEXT:    addi a0, a0, 128
+; RV32I-NEXT:    sub sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 10352
+; RV32I-NEXT:    addi a0, sp, 12
+; RV32I-NEXT:    call callee
+; RV32I-NEXT:    lui a0, 2
+; RV32I-NEXT:    addi a0, a0, 128
+; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: frame_8kb_offset_128:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    lui a0, 2
-; RV64-NEXT:    addiw a0, a0, 128
-; RV64-NEXT:    sub sp, sp, a0
-; RV64-NEXT:    .cfi_def_cfa_offset 10352
-; RV64-NEXT:    addi a0, sp, 8
-; RV64-NEXT:    call callee
-; RV64-NEXT:    lui a0, 2
-; RV64-NEXT:    addiw a0, a0, 128
-; RV64-NEXT:    add sp, sp, a0
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: frame_8kb_offset_128:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    li a0, -1040
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 10352
+; RV32ZBA-NEXT:    addi a0, sp, 12
+; RV32ZBA-NEXT:    call callee
+; RV32ZBA-NEXT:    li a0, 1040
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: frame_8kb_offset_128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    lui a0, 2
+; RV64I-NEXT:    addiw a0, a0, 128
+; RV64I-NEXT:    sub sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 10352
+; RV64I-NEXT:    addi a0, sp, 8
+; RV64I-NEXT:    call callee
+; RV64I-NEXT:    lui a0, 2
+; RV64I-NEXT:    addiw a0, a0, 128
+; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: frame_8kb_offset_128:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    li a0, -1040
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 10352
+; RV64ZBA-NEXT:    addi a0, sp, 8
+; RV64ZBA-NEXT:    call callee
+; RV64ZBA-NEXT:    li a0, 1040
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %a = alloca [10336 x i8]
   call void @callee(ptr %a)
   ret void
 }
 
 define void @frame_16kb_minus_80() {
-; RV32-LABEL: frame_16kb_minus_80:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    lui a0, 4
-; RV32-NEXT:    addi a0, a0, -80
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    .cfi_def_cfa_offset 18336
-; RV32-NEXT:    addi a0, sp, 12
-; RV32-NEXT:    call callee
-; RV32-NEXT:    lui a0, 4
-; RV32-NEXT:    addi a0, a0, -80
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: frame_16kb_minus_80:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    lui a0, 4
+; RV32I-NEXT:    addi a0, a0, -80
+; RV32I-NEXT:    sub sp, sp, a0
+; RV32I-NEXT:    .cfi_def_cfa_offset 18336
+; RV32I-NEXT:    addi a0, sp, 12
+; RV32I-NEXT:    call callee
+; RV32I-NEXT:    lui a0, 4
+; RV32I-NEXT:    addi a0, a0, -80
+; RV32I-NEXT:    add sp, sp, a0
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: frame_16kb_minus_80:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    lui a0, 4
-; RV64-NEXT:    addiw a0, a0, -80
-; RV64-NEXT:    sub sp, sp, a0
-; RV64-NEXT:    .cfi_def_cfa_offset 18336
-; RV64-NEXT:    addi a0, sp, 8
-; RV64-NEXT:    call callee
-; RV64-NEXT:    lui a0, 4
-; RV64-NEXT:    addiw a0, a0, -80
-; RV64-NEXT:    add sp, sp, a0
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: frame_16kb_minus_80:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    li a0, -2038
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 18336
+; RV32ZBA-NEXT:    addi a0, sp, 12
+; RV32ZBA-NEXT:    call callee
+; RV32ZBA-NEXT:    li a0, 2038
+; RV32ZBA-NEXT:    sh3add sp, a0, sp
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: frame_16kb_minus_80:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    lui a0, 4
+; RV64I-NEXT:    addiw a0, a0, -80
+; RV64I-NEXT:    sub sp, sp, a0
+; RV64I-NEXT:    .cfi_def_cfa_offset 18336
+; RV64I-NEXT:    addi a0, sp, 8
+; RV64I-NEXT:    call callee
+; RV64I-NEXT:    lui a0, 4
+; RV64I-NEXT:    addiw a0, a0, -80
+; RV64I-NEXT:    add sp, sp, a0
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: frame_16kb_minus_80:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    li a0, -2038
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 18336
+; RV64ZBA-NEXT:    addi a0, sp, 8
+; RV64ZBA-NEXT:    call callee
+; RV64ZBA-NEXT:    li a0, 2038
+; RV64ZBA-NEXT:    sh3add sp, a0, sp
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %a = alloca [18320 x i8]
   call void @callee(ptr %a)
   ret void
@@ -430,8 +532,3 @@ define void @frame_32kb() {
   call void @callee(ptr %a)
   ret void
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; RV32I: {{.*}}
-; RV32ZBA: {{.*}}
-; RV64I: {{.*}}
-; RV64ZBA: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/stack-offset.ll b/llvm/test/CodeGen/RISCV/stack-offset.ll
index 6a24e5dcdbc3dc..cc81fd62eba9da 100644
--- a/llvm/test/CodeGen/RISCV/stack-offset.ll
+++ b/llvm/test/CodeGen/RISCV/stack-offset.ll
@@ -11,55 +11,101 @@
 declare void @inspect(...)
 
 define void @test() {
-; RV32-LABEL: test:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    addi sp, sp, -2048
-; RV32-NEXT:    addi sp, sp, -1120
-; RV32-NEXT:    .cfi_def_cfa_offset 5200
-; RV32-NEXT:    addi a0, sp, 12
-; RV32-NEXT:    addi a1, sp, 2047
-; RV32-NEXT:    addi a1, a1, 13
-; RV32-NEXT:    lui a2, 1
-; RV32-NEXT:    addi a2, a2, 12
-; RV32-NEXT:    add a2, sp, a2
-; RV32-NEXT:    lui a3, 1
-; RV32-NEXT:    addi a3, a3, 1036
-; RV32-NEXT:    add a3, sp, a3
-; RV32-NEXT:    call inspect
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    addi sp, sp, 1136
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: test:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    addi sp, sp, -2048
+; RV32I-NEXT:    addi sp, sp, -1120
+; RV32I-NEXT:    .cfi_def_cfa_offset 5200
+; RV32I-NEXT:    addi a0, sp, 12
+; RV32I-NEXT:    addi a1, sp, 2047
+; RV32I-NEXT:    addi a1, a1, 13
+; RV32I-NEXT:    lui a2, 1
+; RV32I-NEXT:    addi a2, a2, 12
+; RV32I-NEXT:    add a2, sp, a2
+; RV32I-NEXT:    lui a3, 1
+; RV32I-NEXT:    addi a3, a3, 1036
+; RV32I-NEXT:    add a3, sp, a3
+; RV32I-NEXT:    call inspect
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    addi sp, sp, 1136
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: test:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    addi sp, sp, -2048
-; RV64-NEXT:    addi sp, sp, -1120
-; RV64-NEXT:    .cfi_def_cfa_offset 5200
-; RV64-NEXT:    addi a0, sp, 8
-; RV64-NEXT:    addi a1, sp, 2047
-; RV64-NEXT:    addi a1, a1, 9
-; RV64-NEXT:    lui a2, 1
-; RV64-NEXT:    addiw a2, a2, 8
-; RV64-NEXT:    add a2, sp, a2
-; RV64-NEXT:    lui a3, 1
-; RV64-NEXT:    addiw a3, a3, 1032
-; RV64-NEXT:    add a3, sp, a3
-; RV64-NEXT:    call inspect
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    addi sp, sp, 1136
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: test:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    addi sp, sp, -2048
+; RV32ZBA-NEXT:    addi sp, sp, -1120
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 5200
+; RV32ZBA-NEXT:    addi a0, sp, 12
+; RV32ZBA-NEXT:    addi a1, sp, 2047
+; RV32ZBA-NEXT:    addi a1, a1, 13
+; RV32ZBA-NEXT:    li a2, 1027
+; RV32ZBA-NEXT:    sh2add a2, a2, sp
+; RV32ZBA-NEXT:    li a3, 1283
+; RV32ZBA-NEXT:    sh2add a3, a3, sp
+; RV32ZBA-NEXT:    call inspect
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    addi sp, sp, 1136
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: test:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    addi sp, sp, -2048
+; RV64I-NEXT:    addi sp, sp, -1120
+; RV64I-NEXT:    .cfi_def_cfa_offset 5200
+; RV64I-NEXT:    addi a0, sp, 8
+; RV64I-NEXT:    addi a1, sp, 2047
+; RV64I-NEXT:    addi a1, a1, 9
+; RV64I-NEXT:    lui a2, 1
+; RV64I-NEXT:    addiw a2, a2, 8
+; RV64I-NEXT:    add a2, sp, a2
+; RV64I-NEXT:    lui a3, 1
+; RV64I-NEXT:    addiw a3, a3, 1032
+; RV64I-NEXT:    add a3, sp, a3
+; RV64I-NEXT:    call inspect
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    addi sp, sp, 1136
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: test:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    addi sp, sp, -2048
+; RV64ZBA-NEXT:    addi sp, sp, -1120
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 5200
+; RV64ZBA-NEXT:    addi a0, sp, 8
+; RV64ZBA-NEXT:    addi a1, sp, 2047
+; RV64ZBA-NEXT:    addi a1, a1, 9
+; RV64ZBA-NEXT:    li a2, 513
+; RV64ZBA-NEXT:    sh3add a2, a2, sp
+; RV64ZBA-NEXT:    li a3, 641
+; RV64ZBA-NEXT:    sh3add a3, a3, sp
+; RV64ZBA-NEXT:    call inspect
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    addi sp, sp, 1136
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %p4 = alloca [64 x i8], align 1
   %p3 = alloca [1024 x i8], align 1
   %p2 = alloca [2048 x i8], align 1
@@ -69,45 +115,83 @@ define void @test() {
 }
 
 define void @align_8() {
-; RV32-LABEL: align_8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    addi sp, sp, -2048
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 4112
-; RV32-NEXT:    addi a0, sp, 7
-; RV32-NEXT:    lui a1, 1
-; RV32-NEXT:    addi a1, a1, 8
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    call inspect
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    addi sp, sp, 48
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: align_8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    addi sp, sp, -2048
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    .cfi_def_cfa_offset 4112
+; RV32I-NEXT:    addi a0, sp, 7
+; RV32I-NEXT:    lui a1, 1
+; RV32I-NEXT:    addi a1, a1, 8
+; RV32I-NEXT:    add a1, sp, a1
+; RV32I-NEXT:    call inspect
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: align_8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    addi sp, sp, -2048
-; RV64-NEXT:    addi sp, sp, -48
-; RV64-NEXT:    .cfi_def_cfa_offset 4128
-; RV64-NEXT:    addi a0, sp, 15
-; RV64-NEXT:    lui a1, 1
-; RV64-NEXT:    addiw a1, a1, 16
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    call inspect
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    addi sp, sp, 64
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: align_8:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    addi sp, sp, -2048
+; RV32ZBA-NEXT:    addi sp, sp, -32
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 4112
+; RV32ZBA-NEXT:    addi a0, sp, 7
+; RV32ZBA-NEXT:    li a1, 513
+; RV32ZBA-NEXT:    sh3add a1, a1, sp
+; RV32ZBA-NEXT:    call inspect
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    addi sp, sp, 48
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: align_8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    addi sp, sp, -2048
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    .cfi_def_cfa_offset 4128
+; RV64I-NEXT:    addi a0, sp, 15
+; RV64I-NEXT:    lui a1, 1
+; RV64I-NEXT:    addiw a1, a1, 16
+; RV64I-NEXT:    add a1, sp, a1
+; RV64I-NEXT:    call inspect
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: align_8:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    addi sp, sp, -2048
+; RV64ZBA-NEXT:    addi sp, sp, -48
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 4128
+; RV64ZBA-NEXT:    addi a0, sp, 15
+; RV64ZBA-NEXT:    li a1, 514
+; RV64ZBA-NEXT:    sh3add a1, a1, sp
+; RV64ZBA-NEXT:    call inspect
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    addi sp, sp, 64
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %p2 = alloca i8, align 8
   %p1 = alloca [4097 x i8], align 1
   call void (...) @inspect(ptr %p1, ptr %p2)
@@ -115,45 +199,83 @@ define void @align_8() {
 }
 
 define void @align_4() {
-; RV32-LABEL: align_4:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -2032
-; RV32-NEXT:    .cfi_def_cfa_offset 2032
-; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    addi sp, sp, -2048
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    .cfi_def_cfa_offset 4112
-; RV32-NEXT:    addi a0, sp, 7
-; RV32-NEXT:    lui a1, 1
-; RV32-NEXT:    addi a1, a1, 8
-; RV32-NEXT:    add a1, sp, a1
-; RV32-NEXT:    call inspect
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    addi sp, sp, 48
-; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 2032
-; RV32-NEXT:    ret
+; RV32I-LABEL: align_4:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -2032
+; RV32I-NEXT:    .cfi_def_cfa_offset 2032
+; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    .cfi_offset ra, -4
+; RV32I-NEXT:    addi sp, sp, -2048
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    .cfi_def_cfa_offset 4112
+; RV32I-NEXT:    addi a0, sp, 7
+; RV32I-NEXT:    lui a1, 1
+; RV32I-NEXT:    addi a1, a1, 8
+; RV32I-NEXT:    add a1, sp, a1
+; RV32I-NEXT:    call inspect
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 2032
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: align_4:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -2032
-; RV64-NEXT:    .cfi_def_cfa_offset 2032
-; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
-; RV64-NEXT:    .cfi_offset ra, -8
-; RV64-NEXT:    addi sp, sp, -2048
-; RV64-NEXT:    addi sp, sp, -48
-; RV64-NEXT:    .cfi_def_cfa_offset 4128
-; RV64-NEXT:    addi a0, sp, 19
-; RV64-NEXT:    lui a1, 1
-; RV64-NEXT:    addiw a1, a1, 20
-; RV64-NEXT:    add a1, sp, a1
-; RV64-NEXT:    call inspect
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    addi sp, sp, 64
-; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 2032
-; RV64-NEXT:    ret
+; RV32ZBA-LABEL: align_4:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    addi sp, sp, -2032
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV32ZBA-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32ZBA-NEXT:    .cfi_offset ra, -4
+; RV32ZBA-NEXT:    addi sp, sp, -2048
+; RV32ZBA-NEXT:    addi sp, sp, -32
+; RV32ZBA-NEXT:    .cfi_def_cfa_offset 4112
+; RV32ZBA-NEXT:    addi a0, sp, 7
+; RV32ZBA-NEXT:    li a1, 513
+; RV32ZBA-NEXT:    sh3add a1, a1, sp
+; RV32ZBA-NEXT:    call inspect
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    addi sp, sp, 48
+; RV32ZBA-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32ZBA-NEXT:    addi sp, sp, 2032
+; RV32ZBA-NEXT:    ret
+;
+; RV64I-LABEL: align_4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -2032
+; RV64I-NEXT:    .cfi_def_cfa_offset 2032
+; RV64I-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    .cfi_offset ra, -8
+; RV64I-NEXT:    addi sp, sp, -2048
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    .cfi_def_cfa_offset 4128
+; RV64I-NEXT:    addi a0, sp, 19
+; RV64I-NEXT:    lui a1, 1
+; RV64I-NEXT:    addiw a1, a1, 20
+; RV64I-NEXT:    add a1, sp, a1
+; RV64I-NEXT:    call inspect
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    addi sp, sp, 64
+; RV64I-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 2032
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: align_4:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    addi sp, sp, -2032
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 2032
+; RV64ZBA-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64ZBA-NEXT:    .cfi_offset ra, -8
+; RV64ZBA-NEXT:    addi sp, sp, -2048
+; RV64ZBA-NEXT:    addi sp, sp, -48
+; RV64ZBA-NEXT:    .cfi_def_cfa_offset 4128
+; RV64ZBA-NEXT:    addi a0, sp, 19
+; RV64ZBA-NEXT:    li a1, 1029
+; RV64ZBA-NEXT:    sh2add a1, a1, sp
+; RV64ZBA-NEXT:    call inspect
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    addi sp, sp, 64
+; RV64ZBA-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64ZBA-NEXT:    addi sp, sp, 2032
+; RV64ZBA-NEXT:    ret
   %p2 = alloca i8, align 4
   %p1 = alloca [4097 x i8], align 1
   call void (...) @inspect(ptr %p1, ptr %p2)
@@ -252,8 +374,3 @@ define void @align_1() {
   call void (...) @inspect(ptr %p1, ptr %p2)
   ret void
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; RV32I: {{.*}}
-; RV32ZBA: {{.*}}
-; RV64I: {{.*}}
-; RV64ZBA: {{.*}}

>From a6b962d231c15222106703f505201c4778a821b7 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Mon, 8 Apr 2024 14:49:01 -0700
Subject: [PATCH 2/2] Fix typo

---
 llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 759334feefc64c..426a8b66b2e1e6 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -250,7 +250,7 @@ void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB,
 
   // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
   // instruction.  This saves 1 instruction over the full lui/addi+add fallback
-  // path.  We avoid anything which can be done with a single lui as it night
+  // path.  We avoid anything which can be done with a single lui as it might
   // be compressible.  Note that the sh1add case is fully covered by the 2x addi
   // case just above and is thus ommitted.
   if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {



More information about the llvm-commits mailing list