[llvm] [RISCV] Use ABI align in varargs tests in push-pop-popret.ll. NFC (PR #74423)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 4 23:14:55 PST 2023


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/74423

The explicit 'align 4' caused the pointers to be underaligned on RV64.

>From 362629556332751372cfbb289fc8346009af0e39 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 4 Dec 2023 23:13:39 -0800
Subject: [PATCH] [RISCV] Use ABI align in varargs tests in push-pop-popret.ll.
 NFC

The explicit 'align 4' caused the pointers to be underaligned on RV64.
---
 llvm/test/CodeGen/RISCV/push-pop-popret.ll | 50 ++++++----------------
 1 file changed, 13 insertions(+), 37 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
index 776944b177636..454d85166435f 100644
--- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll
+++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
@@ -1015,24 +1015,16 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
 ; RV64IZCMP-LABEL: varargs:
 ; RV64IZCMP:       # %bb.0:
 ; RV64IZCMP-NEXT:    addi sp, sp, -80
+; RV64IZCMP-NEXT:    sd a1, 24(sp)
 ; RV64IZCMP-NEXT:    sd a7, 72(sp)
 ; RV64IZCMP-NEXT:    sd a6, 64(sp)
 ; RV64IZCMP-NEXT:    sd a5, 56(sp)
 ; RV64IZCMP-NEXT:    sd a4, 48(sp)
 ; RV64IZCMP-NEXT:    sd a3, 40(sp)
 ; RV64IZCMP-NEXT:    sd a2, 32(sp)
-; RV64IZCMP-NEXT:    sd a1, 24(sp)
-; RV64IZCMP-NEXT:    addi a0, sp, 24
+; RV64IZCMP-NEXT:    addi a0, sp, 28
 ; RV64IZCMP-NEXT:    sd a0, 8(sp)
-; RV64IZCMP-NEXT:    lwu a0, 12(sp)
-; RV64IZCMP-NEXT:    lwu a1, 8(sp)
-; RV64IZCMP-NEXT:    slli a0, a0, 32
-; RV64IZCMP-NEXT:    or a0, a0, a1
-; RV64IZCMP-NEXT:    addi a1, a0, 4
-; RV64IZCMP-NEXT:    sw a1, 8(sp)
-; RV64IZCMP-NEXT:    srli a1, a1, 32
-; RV64IZCMP-NEXT:    sw a1, 12(sp)
-; RV64IZCMP-NEXT:    lw a0, 0(a0)
+; RV64IZCMP-NEXT:    lw a0, 24(sp)
 ; RV64IZCMP-NEXT:    addi sp, sp, 80
 ; RV64IZCMP-NEXT:    ret
 ;
@@ -1055,24 +1047,16 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
 ; RV64IZCMP-SR-LABEL: varargs:
 ; RV64IZCMP-SR:       # %bb.0:
 ; RV64IZCMP-SR-NEXT:    addi sp, sp, -80
+; RV64IZCMP-SR-NEXT:    sd a1, 24(sp)
 ; RV64IZCMP-SR-NEXT:    sd a7, 72(sp)
 ; RV64IZCMP-SR-NEXT:    sd a6, 64(sp)
 ; RV64IZCMP-SR-NEXT:    sd a5, 56(sp)
 ; RV64IZCMP-SR-NEXT:    sd a4, 48(sp)
 ; RV64IZCMP-SR-NEXT:    sd a3, 40(sp)
 ; RV64IZCMP-SR-NEXT:    sd a2, 32(sp)
-; RV64IZCMP-SR-NEXT:    sd a1, 24(sp)
-; RV64IZCMP-SR-NEXT:    addi a0, sp, 24
+; RV64IZCMP-SR-NEXT:    addi a0, sp, 28
 ; RV64IZCMP-SR-NEXT:    sd a0, 8(sp)
-; RV64IZCMP-SR-NEXT:    lwu a0, 12(sp)
-; RV64IZCMP-SR-NEXT:    lwu a1, 8(sp)
-; RV64IZCMP-SR-NEXT:    slli a0, a0, 32
-; RV64IZCMP-SR-NEXT:    or a0, a0, a1
-; RV64IZCMP-SR-NEXT:    addi a1, a0, 4
-; RV64IZCMP-SR-NEXT:    sw a1, 8(sp)
-; RV64IZCMP-SR-NEXT:    srli a1, a1, 32
-; RV64IZCMP-SR-NEXT:    sw a1, 12(sp)
-; RV64IZCMP-SR-NEXT:    lw a0, 0(a0)
+; RV64IZCMP-SR-NEXT:    lw a0, 24(sp)
 ; RV64IZCMP-SR-NEXT:    addi sp, sp, 80
 ; RV64IZCMP-SR-NEXT:    ret
 ;
@@ -1095,32 +1079,24 @@ define i32 @varargs(ptr %fmt, ...) nounwind {
 ; RV64I-LABEL: varargs:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -80
+; RV64I-NEXT:    sd a1, 24(sp)
 ; RV64I-NEXT:    sd a7, 72(sp)
 ; RV64I-NEXT:    sd a6, 64(sp)
 ; RV64I-NEXT:    sd a5, 56(sp)
 ; RV64I-NEXT:    sd a4, 48(sp)
 ; RV64I-NEXT:    sd a3, 40(sp)
 ; RV64I-NEXT:    sd a2, 32(sp)
-; RV64I-NEXT:    sd a1, 24(sp)
-; RV64I-NEXT:    addi a0, sp, 24
+; RV64I-NEXT:    addi a0, sp, 28
 ; RV64I-NEXT:    sd a0, 8(sp)
-; RV64I-NEXT:    lwu a0, 12(sp)
-; RV64I-NEXT:    lwu a1, 8(sp)
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    addi a1, a0, 4
-; RV64I-NEXT:    sw a1, 8(sp)
-; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    sw a1, 12(sp)
-; RV64I-NEXT:    lw a0, 0(a0)
+; RV64I-NEXT:    lw a0, 24(sp)
 ; RV64I-NEXT:    addi sp, sp, 80
 ; RV64I-NEXT:    ret
-  %va = alloca ptr, align 4
+  %va = alloca ptr
   call void @llvm.va_start(ptr %va)
-  %argp.cur = load ptr, ptr %va, align 4
+  %argp.cur = load ptr, ptr %va
   %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
-  store ptr %argp.next, ptr %va, align 4
-  %1 = load i32, ptr %argp.cur, align 4
+  store ptr %argp.next, ptr %va
+  %1 = load i32, ptr %argp.cur
   call void @llvm.va_end(ptr %va)
   ret i32 %1
 }



More information about the llvm-commits mailing list