[llvm] 4e9794f - [NFC][RISCV] Use -O0 in trampoline test for easier code observation (#142332)

via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 2 12:14:07 PDT 2025


Author: Jesse Huang
Date: 2025-06-03T03:14:05+08:00
New Revision: 4e9794fdbe266ce8e473c97f7ddc7b604780e5a0

URL: https://github.com/llvm/llvm-project/commit/4e9794fdbe266ce8e473c97f7ddc7b604780e5a0
DIFF: https://github.com/llvm/llvm-project/commit/4e9794fdbe266ce8e473c97f7ddc7b604780e5a0.diff

LOG: [NFC][RISCV] Use -O0 in trampoline test for easier code observation (#142332)

A portion of the trampoline code is optimized into a load from the
constant pool, making the lit test unable to capture the value of it.
Disabling the optimization can keep them load from immediates and able
to observe any value changes.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rv64-trampoline.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rv64-trampoline.ll b/llvm/test/CodeGen/RISCV/rv64-trampoline.ll
index 1ec4d8ddd1d84..6970324bb184e 100644
--- a/llvm/test/CodeGen/RISCV/rv64-trampoline.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-trampoline.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: llc -O0 -mtriple=riscv64 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV64 %s
-; RUN: llc -mtriple=riscv64-unknown-linux-gnu -verify-machineinstrs < %s \
+; RUN: llc -O0 -mtriple=riscv64-unknown-linux-gnu -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV64-LINUX %s
 
 declare void @llvm.init.trampoline(ptr, ptr, ptr)
@@ -13,30 +13,30 @@ define i64 @test0(i64 %n, ptr %p) nounwind {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -64
 ; RV64-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT:    mv s0, a0
+; RV64-NEXT:    sd a0, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    lui a0, %hi(f)
 ; RV64-NEXT:    addi a0, a0, %lo(f)
-; RV64-NEXT:    li a2, 919
-; RV64-NEXT:    lui a3, %hi(.LCPI0_0)
-; RV64-NEXT:    sd a0, 32(sp)
+; RV64-NEXT:    sd a0, 48(sp)
+; RV64-NEXT:    sd a1, 40(sp)
+; RV64-NEXT:    li a0, 919
+; RV64-NEXT:    sw a0, 24(sp)
+; RV64-NEXT:    lui a0, 40
+; RV64-NEXT:    addiw a0, a0, 103
+; RV64-NEXT:    sw a0, 36(sp)
+; RV64-NEXT:    lui a0, 4155
+; RV64-NEXT:    addiw a0, a0, 899
+; RV64-NEXT:    sw a0, 32(sp)
 ; RV64-NEXT:    lui a0, 6203
-; RV64-NEXT:    ld a3, %lo(.LCPI0_0)(a3)
-; RV64-NEXT:    addi a0, a0, 643
-; RV64-NEXT:    sw a2, 8(sp)
-; RV64-NEXT:    sw a0, 12(sp)
-; RV64-NEXT:    sd a3, 16(sp)
-; RV64-NEXT:    sd a1, 24(sp)
-; RV64-NEXT:    addi a1, sp, 24
-; RV64-NEXT:    addi a0, sp, 8
-; RV64-NEXT:    addi s1, sp, 8
+; RV64-NEXT:    addiw a0, a0, 643
+; RV64-NEXT:    sw a0, 28(sp)
+; RV64-NEXT:    addi a1, sp, 40
+; RV64-NEXT:    addi a0, sp, 24
+; RV64-NEXT:    sd a0, 16(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    call __clear_cache
-; RV64-NEXT:    mv a0, s0
-; RV64-NEXT:    jalr s1
+; RV64-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld a1, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    jalr a1
 ; RV64-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 64
 ; RV64-NEXT:    ret
 ;
@@ -44,31 +44,31 @@ define i64 @test0(i64 %n, ptr %p) nounwind {
 ; RV64-LINUX:       # %bb.0:
 ; RV64-LINUX-NEXT:    addi sp, sp, -64
 ; RV64-LINUX-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-LINUX-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-LINUX-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-LINUX-NEXT:    mv s0, a0
+; RV64-LINUX-NEXT:    sd a0, 8(sp) # 8-byte Folded Spill
 ; RV64-LINUX-NEXT:    lui a0, %hi(f)
 ; RV64-LINUX-NEXT:    addi a0, a0, %lo(f)
-; RV64-LINUX-NEXT:    li a2, 919
-; RV64-LINUX-NEXT:    lui a3, %hi(.LCPI0_0)
-; RV64-LINUX-NEXT:    sd a0, 32(sp)
+; RV64-LINUX-NEXT:    sd a0, 48(sp)
+; RV64-LINUX-NEXT:    sd a1, 40(sp)
+; RV64-LINUX-NEXT:    li a0, 919
+; RV64-LINUX-NEXT:    sw a0, 24(sp)
+; RV64-LINUX-NEXT:    lui a0, 40
+; RV64-LINUX-NEXT:    addiw a0, a0, 103
+; RV64-LINUX-NEXT:    sw a0, 36(sp)
+; RV64-LINUX-NEXT:    lui a0, 4155
+; RV64-LINUX-NEXT:    addiw a0, a0, 899
+; RV64-LINUX-NEXT:    sw a0, 32(sp)
 ; RV64-LINUX-NEXT:    lui a0, 6203
-; RV64-LINUX-NEXT:    ld a3, %lo(.LCPI0_0)(a3)
-; RV64-LINUX-NEXT:    addi a0, a0, 643
-; RV64-LINUX-NEXT:    sw a2, 8(sp)
-; RV64-LINUX-NEXT:    sw a0, 12(sp)
-; RV64-LINUX-NEXT:    sd a3, 16(sp)
-; RV64-LINUX-NEXT:    sd a1, 24(sp)
-; RV64-LINUX-NEXT:    addi a1, sp, 24
-; RV64-LINUX-NEXT:    addi a0, sp, 8
-; RV64-LINUX-NEXT:    addi s1, sp, 8
+; RV64-LINUX-NEXT:    addiw a0, a0, 643
+; RV64-LINUX-NEXT:    sw a0, 28(sp)
+; RV64-LINUX-NEXT:    addi a1, sp, 40
+; RV64-LINUX-NEXT:    addi a0, sp, 24
+; RV64-LINUX-NEXT:    sd a0, 16(sp) # 8-byte Folded Spill
 ; RV64-LINUX-NEXT:    li a2, 0
 ; RV64-LINUX-NEXT:    call __riscv_flush_icache
-; RV64-LINUX-NEXT:    mv a0, s0
-; RV64-LINUX-NEXT:    jalr s1
+; RV64-LINUX-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
+; RV64-LINUX-NEXT:    ld a1, 16(sp) # 8-byte Folded Reload
+; RV64-LINUX-NEXT:    jalr a1
 ; RV64-LINUX-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-LINUX-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-LINUX-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
 ; RV64-LINUX-NEXT:    addi sp, sp, 64
 ; RV64-LINUX-NEXT:    ret
   %alloca = alloca [32 x i8], align 8


        


More information about the llvm-commits mailing list