[llvm] [RISCV] Use iXLen for ptr<->int casts in vararg.ll (PR #74426)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 4 23:32:20 PST 2023


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/74426

Also use ABI alignment for ptr sized objects.

This makes the code more sane and avoids only loading part of what was stored by vastart on RV64.

>From dbc3c9a4ff26524d29dff637a0760e02634405ab Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 4 Dec 2023 23:29:57 -0800
Subject: [PATCH] [RISCV] Use iXLen for ptr<->int casts in vararg.ll

Also use ABI alignment for ptr sized objects.

This makes the code more sane and avoids only loading part of what
was stored by vastart on RV64.
---
 llvm/test/CodeGen/RISCV/vararg.ll | 63 ++++++++++++-------------------
 1 file changed, 24 insertions(+), 39 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index 59aa1d9ae2893..dd41aaab79308 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -1,25 +1,25 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -verify-machineinstrs \
 ; RUN:   | FileCheck -check-prefix=ILP32-ILP32F-FPELIM %s
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs -frame-pointer=all < %s \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -verify-machineinstrs -frame-pointer=all \
 ; RUN:   | FileCheck -check-prefix=ILP32-ILP32F-WITHFP %s
-; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs \
 ; RUN:   | FileCheck -check-prefix=RV32D-ILP32-ILP32F-ILP32D-FPELIM %s
-; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi ilp32f \
-; RUN:     -verify-machineinstrs < %s \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d -target-abi ilp32f \
+; RUN:     -verify-machineinstrs \
 ; RUN:   | FileCheck -check-prefix=RV32D-ILP32-ILP32F-ILP32D-FPELIM %s
-; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi ilp32d \
-; RUN:     -verify-machineinstrs < %s \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d -target-abi ilp32d \
+; RUN:     -verify-machineinstrs \
 ; RUN:   | FileCheck -check-prefix=RV32D-ILP32-ILP32F-ILP32D-FPELIM %s
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -verify-machineinstrs \
 ; RUN:   | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s
-; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64f \
-; RUN:     -verify-machineinstrs < %s \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d -target-abi lp64f \
+; RUN:     -verify-machineinstrs \
 ; RUN:   | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s
-; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \
-; RUN:     -verify-machineinstrs < %s \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \
+; RUN:     -verify-machineinstrs \
 ; RUN:   | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs -frame-pointer=all < %s \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -verify-machineinstrs -frame-pointer=all \
 ; RUN:   | FileCheck -check-prefix=LP64-LP64F-LP64D-WITHFP %s
 
 ; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
@@ -556,6 +556,7 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-LABEL: va2:
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -80
+; LP64-LP64F-LP64D-FPELIM-NEXT:    mv a0, a1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a7, 72(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a6, 64(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a5, 56(sp)
@@ -563,17 +564,8 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 40(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 32(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, sp, 24
-; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    lw a0, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 7
-; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a0, 32
-; LP64-LP64F-LP64D-FPELIM-NEXT:    srli a1, a1, 32
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, a1, 8
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, sp, 39
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    srliw a0, a0, 3
-; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a0, a0, 3
-; LP64-LP64F-LP64D-FPELIM-NEXT:    ld a0, 0(a0)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 80
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ret
 ;
@@ -583,6 +575,7 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 32
+; LP64-LP64F-LP64D-WITHFP-NEXT:    mv a0, a1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a7, 56(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a6, 48(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a5, 40(s0)
@@ -590,30 +583,22 @@ define i64 @va2(ptr %fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, 24(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, 16(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, 8(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, s0, 8
-; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    lw a0, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 7
-; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a0, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT:    srli a1, a1, 32
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, a1, 8
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, s0, 23
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    srliw a0, a0, 3
-; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a0, a0, 3
-; LP64-LP64F-LP64D-WITHFP-NEXT:    ld a0, 0(a0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi sp, sp, 96
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ret
   %va = alloca ptr
   call void @llvm.va_start(ptr %va)
-  %argp.cur = load i32, ptr %va, align 4
-  %1 = add i32 %argp.cur, 7
-  %2 = and i32 %1, -8
-  %argp.cur.aligned = inttoptr i32 %1 to ptr
+  %argp.cur = load ptr, ptr %va
+  %ptrint = ptrtoint ptr %argp.cur to iXLen
+  %1 = add iXLen %ptrint, 7
+  %2 = and iXLen %1, -8
+  %argp.cur.aligned = inttoptr iXLen %1 to ptr
   %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
-  store ptr %argp.next, ptr %va, align 4
-  %3 = inttoptr i32 %2 to ptr
+  store ptr %argp.next, ptr %va
+  %3 = inttoptr iXLen %2 to ptr
   %4 = load double, ptr %3, align 8
   %5 = bitcast double %4 to i64
   call void @llvm.va_end(ptr %va)



More information about the llvm-commits mailing list