[llvm] 2dccf11 - [RISCV] Remove gp and tp from callee saved register lists. (#76483)

via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 27 21:36:07 PST 2023


Author: Craig Topper
Date: 2023-12-27T21:36:03-08:00
New Revision: 2dccf11b92ee97091fb08b3f3b385d2b870e41b7

URL: https://github.com/llvm/llvm-project/commit/2dccf11b92ee97091fb08b3f3b385d2b870e41b7
DIFF: https://github.com/llvm/llvm-project/commit/2dccf11b92ee97091fb08b3f3b385d2b870e41b7.diff

LOG: [RISCV] Remove gp and tp from callee saved register lists. (#76483)

This appears to match gcc behavior.

Resolves
https://discourse.llvm.org/t/risc-v-calling-convention-implementation-in-clang-tp-and-gp-registers/75757

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVCallingConv.td
    llvm/test/CodeGen/RISCV/inline-asm-abi-names.ll
    llvm/test/CodeGen/RISCV/stack-folding.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td
index 130a6ecc143dd8..3dd0b372382865 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.td
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td
@@ -14,7 +14,7 @@
 // RISCVISelLowering.cpp (CC_RISCV).
 
 def CSR_ILP32_LP64
-    : CalleeSavedRegs<(add X1, X3, X4, X8, X9, (sequence "X%u", 18, 27))>;
+    : CalleeSavedRegs<(add X1, X8, X9, (sequence "X%u", 18, 27))>;
 
 def CSR_ILP32F_LP64F
     : CalleeSavedRegs<(add CSR_ILP32_LP64,
@@ -29,7 +29,7 @@ def CSR_NoRegs : CalleeSavedRegs<(add)>;
 
 // Interrupt handler needs to save/restore all registers that are used,
 // both Caller and Callee saved registers.
-def CSR_Interrupt : CalleeSavedRegs<(add X1, (sequence "X%u", 3, 31))>;
+def CSR_Interrupt : CalleeSavedRegs<(add X1, (sequence "X%u", 5, 31))>;
 
 // Same as CSR_Interrupt, but including all 32-bit FP registers.
 def CSR_XLEN_F32_Interrupt: CalleeSavedRegs<(add CSR_Interrupt,

diff  --git a/llvm/test/CodeGen/RISCV/inline-asm-abi-names.ll b/llvm/test/CodeGen/RISCV/inline-asm-abi-names.ll
index 8bfce389497b3f..e235372b0587a5 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-abi-names.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-abi-names.ll
@@ -156,26 +156,18 @@ define i32 @explicit_register_sp(i32 %a) nounwind {
 define i32 @explicit_register_x3(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x3:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw gp, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv gp, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, gp, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw gp, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x3:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd gp, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv gp, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, gp, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld gp, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x3}"(i32 %a)
   ret i32 %1
@@ -185,26 +177,18 @@ define i32 @explicit_register_x3(i32 %a) nounwind {
 define i32 @explicit_register_gp(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_gp:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw gp, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv gp, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, gp, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw gp, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_gp:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd gp, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv gp, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, gp, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld gp, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{gp}"(i32 %a)
   ret i32 %1
@@ -214,26 +198,18 @@ define i32 @explicit_register_gp(i32 %a) nounwind {
 define i32 @explicit_register_x4(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_x4:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw tp, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv tp, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, tp, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw tp, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_x4:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd tp, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv tp, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, tp, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld tp, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{x4}"(i32 %a)
   ret i32 %1
@@ -243,26 +219,18 @@ define i32 @explicit_register_x4(i32 %a) nounwind {
 define i32 @explicit_register_tp(i32 %a) nounwind {
 ; RV32I-LABEL: explicit_register_tp:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw tp, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv tp, a0
 ; RV32I-NEXT:    #APP
 ; RV32I-NEXT:    addi a0, tp, 0
 ; RV32I-NEXT:    #NO_APP
-; RV32I-NEXT:    lw tp, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: explicit_register_tp:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd tp, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv tp, a0
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    addi a0, tp, 0
 ; RV64I-NEXT:    #NO_APP
-; RV64I-NEXT:    ld tp, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %1 = tail call i32 asm "addi $0, $1, 0", "=r,{tp}"(i32 %a)
   ret i32 %1

diff  --git a/llvm/test/CodeGen/RISCV/stack-folding.ll b/llvm/test/CodeGen/RISCV/stack-folding.ll
index 01f866f612166e..8373a745e45cba 100644
--- a/llvm/test/CodeGen/RISCV/stack-folding.ll
+++ b/llvm/test/CodeGen/RISCV/stack-folding.ll
@@ -7,10 +7,8 @@
 define i1 @test_sext_w(i64 %x, i32 %y) nounwind {
 ; CHECK-LABEL: test_sext_w:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -144
-; CHECK-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd gp, 128(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd tp, 120(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    addi sp, sp, -128
+; CHECK-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
@@ -36,9 +34,7 @@ define i1 @test_sext_w(i64 %x, i32 %y) nounwind {
 ; CHECK-NEXT:    lw a0, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    slti a0, a0, 0
 ; CHECK-NEXT:  .LBB0_3: # %falsebb
-; CHECK-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld gp, 128(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld tp, 120(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
@@ -51,7 +47,7 @@ define i1 @test_sext_w(i64 %x, i32 %y) nounwind {
 ; CHECK-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    addi sp, sp, 144
+; CHECK-NEXT:    addi sp, sp, 128
 ; CHECK-NEXT:    ret
   tail call void asm sideeffect "", "~{x1},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"()
   %a = icmp eq i64 %x, 0
@@ -67,10 +63,8 @@ falsebb:
 define i64 @test_sext_b(i64 %x, i8 %y) nounwind {
 ; RV64I-LABEL: test_sext_b:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -144
-; RV64I-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd gp, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd tp, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addi sp, sp, -128
+; RV64I-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
@@ -97,9 +91,7 @@ define i64 @test_sext_b(i64 %x, i8 %y) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:  .LBB1_3: # %falsebb
-; RV64I-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld gp, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld tp, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
@@ -112,15 +104,13 @@ define i64 @test_sext_b(i64 %x, i8 %y) nounwind {
 ; RV64I-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 144
+; RV64I-NEXT:    addi sp, sp, 128
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZB-LABEL: test_sext_b:
 ; RV64ZB:       # %bb.0:
-; RV64ZB-NEXT:    addi sp, sp, -144
-; RV64ZB-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
-; RV64ZB-NEXT:    sd gp, 128(sp) # 8-byte Folded Spill
-; RV64ZB-NEXT:    sd tp, 120(sp) # 8-byte Folded Spill
+; RV64ZB-NEXT:    addi sp, sp, -128
+; RV64ZB-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
@@ -145,9 +135,7 @@ define i64 @test_sext_b(i64 %x, i8 %y) nounwind {
 ; RV64ZB-NEXT:  .LBB1_2: # %truebb
 ; RV64ZB-NEXT:    lb a0, 8(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:  .LBB1_3: # %falsebb
-; RV64ZB-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    ld gp, 128(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    ld tp, 120(sp) # 8-byte Folded Reload
+; RV64ZB-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
@@ -160,7 +148,7 @@ define i64 @test_sext_b(i64 %x, i8 %y) nounwind {
 ; RV64ZB-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    addi sp, sp, 144
+; RV64ZB-NEXT:    addi sp, sp, 128
 ; RV64ZB-NEXT:    ret
   tail call void asm sideeffect "", "~{x1},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"()
   %a = icmp eq i64 %x, 0
@@ -176,10 +164,8 @@ falsebb:
 define i64 @test_sext_h(i64 %x, i16 %y) nounwind {
 ; RV64I-LABEL: test_sext_h:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -144
-; RV64I-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd gp, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd tp, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addi sp, sp, -128
+; RV64I-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
@@ -206,9 +192,7 @@ define i64 @test_sext_h(i64 %x, i16 %y) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
 ; RV64I-NEXT:  .LBB2_3: # %falsebb
-; RV64I-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld gp, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld tp, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
@@ -221,15 +205,13 @@ define i64 @test_sext_h(i64 %x, i16 %y) nounwind {
 ; RV64I-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 144
+; RV64I-NEXT:    addi sp, sp, 128
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZB-LABEL: test_sext_h:
 ; RV64ZB:       # %bb.0:
-; RV64ZB-NEXT:    addi sp, sp, -144
-; RV64ZB-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
-; RV64ZB-NEXT:    sd gp, 128(sp) # 8-byte Folded Spill
-; RV64ZB-NEXT:    sd tp, 120(sp) # 8-byte Folded Spill
+; RV64ZB-NEXT:    addi sp, sp, -128
+; RV64ZB-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
@@ -254,9 +236,7 @@ define i64 @test_sext_h(i64 %x, i16 %y) nounwind {
 ; RV64ZB-NEXT:  .LBB2_2: # %truebb
 ; RV64ZB-NEXT:    lh a0, 8(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:  .LBB2_3: # %falsebb
-; RV64ZB-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    ld gp, 128(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    ld tp, 120(sp) # 8-byte Folded Reload
+; RV64ZB-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
@@ -269,7 +249,7 @@ define i64 @test_sext_h(i64 %x, i16 %y) nounwind {
 ; RV64ZB-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    addi sp, sp, 144
+; RV64ZB-NEXT:    addi sp, sp, 128
 ; RV64ZB-NEXT:    ret
   tail call void asm sideeffect "", "~{x1},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"()
   %a = icmp eq i64 %x, 0
@@ -285,10 +265,8 @@ falsebb:
 define i64 @test_zext_b(i64 %x, i8 %y) nounwind {
 ; CHECK-LABEL: test_zext_b:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -144
-; CHECK-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd gp, 128(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd tp, 120(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    addi sp, sp, -128
+; CHECK-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
@@ -313,9 +291,7 @@ define i64 @test_zext_b(i64 %x, i8 %y) nounwind {
 ; CHECK-NEXT:  .LBB3_2: # %truebb
 ; CHECK-NEXT:    lbu a0, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:  .LBB3_3: # %falsebb
-; CHECK-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld gp, 128(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld tp, 120(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
@@ -328,7 +304,7 @@ define i64 @test_zext_b(i64 %x, i8 %y) nounwind {
 ; CHECK-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    addi sp, sp, 144
+; CHECK-NEXT:    addi sp, sp, 128
 ; CHECK-NEXT:    ret
   tail call void asm sideeffect "", "~{x1},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"()
   %a = icmp eq i64 %x, 0
@@ -344,10 +320,8 @@ falsebb:
 define i64 @test_zext_h(i64 %x, i16 %y) nounwind {
 ; RV64I-LABEL: test_zext_h:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -144
-; RV64I-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd gp, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd tp, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addi sp, sp, -128
+; RV64I-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
@@ -374,9 +348,7 @@ define i64 @test_zext_h(i64 %x, i16 %y) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:  .LBB4_3: # %falsebb
-; RV64I-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld gp, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld tp, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
@@ -389,15 +361,13 @@ define i64 @test_zext_h(i64 %x, i16 %y) nounwind {
 ; RV64I-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 144
+; RV64I-NEXT:    addi sp, sp, 128
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZB-LABEL: test_zext_h:
 ; RV64ZB:       # %bb.0:
-; RV64ZB-NEXT:    addi sp, sp, -144
-; RV64ZB-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
-; RV64ZB-NEXT:    sd gp, 128(sp) # 8-byte Folded Spill
-; RV64ZB-NEXT:    sd tp, 120(sp) # 8-byte Folded Spill
+; RV64ZB-NEXT:    addi sp, sp, -128
+; RV64ZB-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
@@ -422,9 +392,7 @@ define i64 @test_zext_h(i64 %x, i16 %y) nounwind {
 ; RV64ZB-NEXT:  .LBB4_2: # %truebb
 ; RV64ZB-NEXT:    lhu a0, 8(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:  .LBB4_3: # %falsebb
-; RV64ZB-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    ld gp, 128(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    ld tp, 120(sp) # 8-byte Folded Reload
+; RV64ZB-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
@@ -437,7 +405,7 @@ define i64 @test_zext_h(i64 %x, i16 %y) nounwind {
 ; RV64ZB-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    addi sp, sp, 144
+; RV64ZB-NEXT:    addi sp, sp, 128
 ; RV64ZB-NEXT:    ret
   tail call void asm sideeffect "", "~{x1},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"()
   %a = icmp eq i64 %x, 0
@@ -453,10 +421,8 @@ falsebb:
 define i64 @test_zext_w(i64 %x, i32 %y) nounwind {
 ; RV64I-LABEL: test_zext_w:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -144
-; RV64I-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd gp, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd tp, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addi sp, sp, -128
+; RV64I-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
@@ -483,9 +449,7 @@ define i64 @test_zext_w(i64 %x, i32 %y) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:  .LBB5_3: # %falsebb
-; RV64I-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld gp, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld tp, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
@@ -498,15 +462,13 @@ define i64 @test_zext_w(i64 %x, i32 %y) nounwind {
 ; RV64I-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 144
+; RV64I-NEXT:    addi sp, sp, 128
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZB-LABEL: test_zext_w:
 ; RV64ZB:       # %bb.0:
-; RV64ZB-NEXT:    addi sp, sp, -144
-; RV64ZB-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
-; RV64ZB-NEXT:    sd gp, 128(sp) # 8-byte Folded Spill
-; RV64ZB-NEXT:    sd tp, 120(sp) # 8-byte Folded Spill
+; RV64ZB-NEXT:    addi sp, sp, -128
+; RV64ZB-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
 ; RV64ZB-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
@@ -531,9 +493,7 @@ define i64 @test_zext_w(i64 %x, i32 %y) nounwind {
 ; RV64ZB-NEXT:  .LBB5_2: # %truebb
 ; RV64ZB-NEXT:    lwu a0, 8(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:  .LBB5_3: # %falsebb
-; RV64ZB-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    ld gp, 128(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    ld tp, 120(sp) # 8-byte Folded Reload
+; RV64ZB-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
@@ -546,7 +506,7 @@ define i64 @test_zext_w(i64 %x, i32 %y) nounwind {
 ; RV64ZB-NEXT:    ld s9, 40(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s10, 32(sp) # 8-byte Folded Reload
 ; RV64ZB-NEXT:    ld s11, 24(sp) # 8-byte Folded Reload
-; RV64ZB-NEXT:    addi sp, sp, 144
+; RV64ZB-NEXT:    addi sp, sp, 128
 ; RV64ZB-NEXT:    ret
   tail call void asm sideeffect "", "~{x1},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"()
   %a = icmp eq i64 %x, 0


        


More information about the llvm-commits mailing list