[llvm] 47b1f83 - [RISCV] Implement isUsedByReturnOnly TargetLowering hook in order to tailcall more libcalls

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 10 02:51:54 PDT 2022


Author: Alex Bradbury
Date: 2022-08-10T10:50:29+01:00
New Revision: 47b1f8362aa409b591b5e9c8a9ff0e3e1d76ed9c

URL: https://github.com/llvm/llvm-project/commit/47b1f8362aa409b591b5e9c8a9ff0e3e1d76ed9c
DIFF: https://github.com/llvm/llvm-project/commit/47b1f8362aa409b591b5e9c8a9ff0e3e1d76ed9c.diff

LOG: [RISCV] Implement isUsedByReturnOnly TargetLowering hook in order to tailcall more libcalls

Prior to this patch, libcalls inserted by the SelectionDAG legalizer
could never be tailcalled. The eligibility of libcalls for tail calling
is is partly determined by checking TargetLowering::isInTailCallPosition
and comparing the return type of the libcall and the calleer.
isInTailCallPosition in turn calls TargetLowering::isUsedByReturnOnly
(which always returns false if not implemented by the target).

This patch provides a minimal implementation of
TargetLowering::isUsedByReturnOnly - enough to support tail calling
libcalls on hard float ABIs. Soft-float ABIs are left for a follow on
patch. libcall-tail-calls.ll also shows missed opportunities to tail
call integer libcalls, but this is due to issues outside of
the isUsedByReturnOnly hook.

Differential Revision: https://reviews.llvm.org/D131087

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/test/CodeGen/RISCV/div.ll
    llvm/test/CodeGen/RISCV/double-frem.ll
    llvm/test/CodeGen/RISCV/double-intrinsics.ll
    llvm/test/CodeGen/RISCV/double-round-conv.ll
    llvm/test/CodeGen/RISCV/float-intrinsics.ll
    llvm/test/CodeGen/RISCV/float-round-conv.ll
    llvm/test/CodeGen/RISCV/fp16-promote.ll
    llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
    llvm/test/CodeGen/RISCV/mul.ll
    llvm/test/CodeGen/RISCV/rem.ll
    llvm/test/CodeGen/RISCV/srem-lkk.ll
    llvm/test/CodeGen/RISCV/urem-lkk.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2593365ae55c1..b8aea31ae0bca 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -11812,6 +11812,40 @@ void RISCVTargetLowering::validateCCReservedRegs(
         F, "Argument register required, but has been reserved."});
 }
 
+// Check if the result of the node is only used as a return value, as
+// otherwise we can't perform a tail-call.
+bool RISCVTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
+  if (N->getNumValues() != 1)
+    return false;
+  if (!N->hasNUsesOfValue(1, 0))
+    return false;
+
+  SDNode *Copy = *N->use_begin();
+  // TODO: Handle additional opcodes in order to support tail-calling libcalls
+  // with soft float ABIs.
+  if (Copy->getOpcode() != ISD::CopyToReg) {
+    return false;
+  }
+
+  // If the ISD::CopyToReg has a glue operand, we conservatively assume it
+  // isn't safe to perform a tail call.
+  if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == MVT::Glue)
+    return false;
+
+  // The copy must be used by a RISCVISD::RET_FLAG, and nothing else.
+  bool HasRet = false;
+  for (SDNode *Node : Copy->uses()) {
+    if (Node->getOpcode() != RISCVISD::RET_FLAG)
+      return false;
+    HasRet = true;
+  }
+  if (!HasRet)
+    return false;
+
+  Chain = Copy->getOperand(0);
+  return true;
+}
+
 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
   return CI->isTailCall();
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 2f5d4d5954e69..5a1acde4a5b26 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -528,6 +528,7 @@ class RISCVTargetLowering : public TargetLowering {
 
   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                          Type *Ty) const override;
+  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
   bool shouldConsiderGEPOffsetSplit() const override { return true; }
 

diff  --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index 52233933eaf77..4c62bdb9f4fe5 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -11,12 +11,7 @@
 define i32 @udiv(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: udiv:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __udivsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __udivsi3 at plt
 ;
 ; RV32IM-LABEL: udiv:
 ; RV32IM:       # %bb.0:
@@ -47,13 +42,8 @@ define i32 @udiv(i32 %a, i32 %b) nounwind {
 define i32 @udiv_constant(i32 %a) nounwind {
 ; RV32I-LABEL: udiv_constant:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    call __udivsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __udivsi3 at plt
 ;
 ; RV32IM-LABEL: udiv_constant:
 ; RV32IM:       # %bb.0:
@@ -115,14 +105,9 @@ define i32 @udiv_pow2(i32 %a) nounwind {
 define i32 @udiv_constant_lhs(i32 %a) nounwind {
 ; RV32I-LABEL: udiv_constant_lhs:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    li a0, 10
-; RV32I-NEXT:    call __udivsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __udivsi3 at plt
 ;
 ; RV32IM-LABEL: udiv_constant_lhs:
 ; RV32IM:       # %bb.0:
@@ -172,12 +157,7 @@ define i64 @udiv64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64I-LABEL: udiv64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __udivdi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __udivdi3 at plt
 ;
 ; RV64IM-LABEL: udiv64:
 ; RV64IM:       # %bb.0:
@@ -212,13 +192,8 @@ define i64 @udiv64_constant(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: udiv64_constant:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __udivdi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __udivdi3 at plt
 ;
 ; RV64IM-LABEL: udiv64_constant:
 ; RV64IM:       # %bb.0:
@@ -260,14 +235,9 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: udiv64_constant_lhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __udivdi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __udivdi3 at plt
 ;
 ; RV64IM-LABEL: udiv64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -596,12 +566,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
 define i32 @sdiv(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sdiv:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __divsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __divsi3 at plt
 ;
 ; RV32IM-LABEL: sdiv:
 ; RV32IM:       # %bb.0:
@@ -630,13 +595,8 @@ define i32 @sdiv(i32 %a, i32 %b) nounwind {
 define i32 @sdiv_constant(i32 %a) nounwind {
 ; RV32I-LABEL: sdiv_constant:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 5
-; RV32I-NEXT:    call __divsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __divsi3 at plt
 ;
 ; RV32IM-LABEL: sdiv_constant:
 ; RV32IM:       # %bb.0:
@@ -748,14 +708,9 @@ define i32 @sdiv_pow2_2(i32 %a) nounwind {
 define i32 @sdiv_constant_lhs(i32 %a) nounwind {
 ; RV32I-LABEL: sdiv_constant_lhs:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    li a0, -10
-; RV32I-NEXT:    call __divsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __divsi3 at plt
 ;
 ; RV32IM-LABEL: sdiv_constant_lhs:
 ; RV32IM:       # %bb.0:
@@ -804,12 +759,7 @@ define i64 @sdiv64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64I-LABEL: sdiv64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __divdi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __divdi3 at plt
 ;
 ; RV64IM-LABEL: sdiv64:
 ; RV64IM:       # %bb.0:
@@ -844,13 +794,8 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: sdiv64_constant:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    call __divdi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __divdi3 at plt
 ;
 ; RV64IM-LABEL: sdiv64_constant:
 ; RV64IM:       # %bb.0:
@@ -894,14 +839,9 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: sdiv64_constant_lhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __divdi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __divdi3 at plt
 ;
 ; RV64IM-LABEL: sdiv64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -942,14 +882,9 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
 ;
 ; RV64I-LABEL: sdiv64_sext_operands:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sext.w a1, a1
-; RV64I-NEXT:    call __divdi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __divdi3 at plt
 ;
 ; RV64IM-LABEL: sdiv64_sext_operands:
 ; RV64IM:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/double-frem.ll b/llvm/test/CodeGen/RISCV/double-frem.ll
index d3724eb1b4418..555793ec291c0 100644
--- a/llvm/test/CodeGen/RISCV/double-frem.ll
+++ b/llvm/test/CodeGen/RISCV/double-frem.ll
@@ -7,21 +7,11 @@
 define double @frem_f64(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: frem_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call fmod at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
+; RV32IFD-NEXT:    tail fmod at plt
 ;
 ; RV64IFD-LABEL: frem_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call fmod at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:    tail fmod at plt
   %1 = frem double %a, %b
   ret double %1
 }

diff  --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index 374d1308714a9..b66d156abbee7 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -44,12 +44,7 @@ declare double @llvm.powi.f64.i32(double, i32)
 define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV32IFD-LABEL: powi_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call __powidf2 at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
+; RV32IFD-NEXT:    tail __powidf2 at plt
 ;
 ; RV64IFD-LABEL: powi_f64:
 ; RV64IFD:       # %bb.0:
@@ -86,23 +81,9 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 declare double @llvm.sin.f64(double)
 
 define double @sin_f64(double %a) nounwind {
-; RV32IFD-LABEL: sin_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call sin at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: sin_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call sin at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: sin_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail sin at plt
 ;
 ; RV32I-LABEL: sin_f64:
 ; RV32I:       # %bb.0:
@@ -128,23 +109,9 @@ define double @sin_f64(double %a) nounwind {
 declare double @llvm.cos.f64(double)
 
 define double @cos_f64(double %a) nounwind {
-; RV32IFD-LABEL: cos_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call cos at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: cos_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call cos at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: cos_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail cos at plt
 ;
 ; RV32I-LABEL: cos_f64:
 ; RV32I:       # %bb.0:
@@ -262,23 +229,9 @@ define double @sincos_f64(double %a) nounwind {
 declare double @llvm.pow.f64(double, double)
 
 define double @pow_f64(double %a, double %b) nounwind {
-; RV32IFD-LABEL: pow_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call pow at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: pow_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call pow at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: pow_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail pow at plt
 ;
 ; RV32I-LABEL: pow_f64:
 ; RV32I:       # %bb.0:
@@ -304,23 +257,9 @@ define double @pow_f64(double %a, double %b) nounwind {
 declare double @llvm.exp.f64(double)
 
 define double @exp_f64(double %a) nounwind {
-; RV32IFD-LABEL: exp_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call exp at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: exp_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call exp at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: exp_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail exp at plt
 ;
 ; RV32I-LABEL: exp_f64:
 ; RV32I:       # %bb.0:
@@ -346,23 +285,9 @@ define double @exp_f64(double %a) nounwind {
 declare double @llvm.exp2.f64(double)
 
 define double @exp2_f64(double %a) nounwind {
-; RV32IFD-LABEL: exp2_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call exp2 at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: exp2_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call exp2 at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: exp2_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail exp2 at plt
 ;
 ; RV32I-LABEL: exp2_f64:
 ; RV32I:       # %bb.0:
@@ -388,23 +313,9 @@ define double @exp2_f64(double %a) nounwind {
 declare double @llvm.log.f64(double)
 
 define double @log_f64(double %a) nounwind {
-; RV32IFD-LABEL: log_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call log at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: log_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call log at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: log_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail log at plt
 ;
 ; RV32I-LABEL: log_f64:
 ; RV32I:       # %bb.0:
@@ -430,23 +341,9 @@ define double @log_f64(double %a) nounwind {
 declare double @llvm.log10.f64(double)
 
 define double @log10_f64(double %a) nounwind {
-; RV32IFD-LABEL: log10_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call log10 at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: log10_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call log10 at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: log10_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail log10 at plt
 ;
 ; RV32I-LABEL: log10_f64:
 ; RV32I:       # %bb.0:
@@ -472,23 +369,9 @@ define double @log10_f64(double %a) nounwind {
 declare double @llvm.log2.f64(double)
 
 define double @log2_f64(double %a) nounwind {
-; RV32IFD-LABEL: log2_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call log2 at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: log2_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call log2 at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: log2_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail log2 at plt
 ;
 ; RV32I-LABEL: log2_f64:
 ; RV32I:       # %bb.0:
@@ -713,23 +596,9 @@ define double @copysign_f64(double %a, double %b) nounwind {
 declare double @llvm.floor.f64(double)
 
 define double @floor_f64(double %a) nounwind {
-; RV32IFD-LABEL: floor_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call floor at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: floor_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call floor at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: floor_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail floor at plt
 ;
 ; RV32I-LABEL: floor_f64:
 ; RV32I:       # %bb.0:
@@ -755,23 +624,9 @@ define double @floor_f64(double %a) nounwind {
 declare double @llvm.ceil.f64(double)
 
 define double @ceil_f64(double %a) nounwind {
-; RV32IFD-LABEL: ceil_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call ceil at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: ceil_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call ceil at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: ceil_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail ceil at plt
 ;
 ; RV32I-LABEL: ceil_f64:
 ; RV32I:       # %bb.0:
@@ -797,23 +652,9 @@ define double @ceil_f64(double %a) nounwind {
 declare double @llvm.trunc.f64(double)
 
 define double @trunc_f64(double %a) nounwind {
-; RV32IFD-LABEL: trunc_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call trunc at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: trunc_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call trunc at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: trunc_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail trunc at plt
 ;
 ; RV32I-LABEL: trunc_f64:
 ; RV32I:       # %bb.0:
@@ -839,23 +680,9 @@ define double @trunc_f64(double %a) nounwind {
 declare double @llvm.rint.f64(double)
 
 define double @rint_f64(double %a) nounwind {
-; RV32IFD-LABEL: rint_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call rint at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: rint_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call rint at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: rint_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail rint at plt
 ;
 ; RV32I-LABEL: rint_f64:
 ; RV32I:       # %bb.0:
@@ -881,23 +708,9 @@ define double @rint_f64(double %a) nounwind {
 declare double @llvm.nearbyint.f64(double)
 
 define double @nearbyint_f64(double %a) nounwind {
-; RV32IFD-LABEL: nearbyint_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call nearbyint at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: nearbyint_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call nearbyint at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: nearbyint_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail nearbyint at plt
 ;
 ; RV32I-LABEL: nearbyint_f64:
 ; RV32I:       # %bb.0:
@@ -923,23 +736,9 @@ define double @nearbyint_f64(double %a) nounwind {
 declare double @llvm.round.f64(double)
 
 define double @round_f64(double %a) nounwind {
-; RV32IFD-LABEL: round_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call round at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: round_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call round at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: round_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail round at plt
 ;
 ; RV32I-LABEL: round_f64:
 ; RV32I:       # %bb.0:
@@ -965,23 +764,9 @@ define double @round_f64(double %a) nounwind {
 declare double @llvm.roundeven.f64(double)
 
 define double @roundeven_f64(double %a) nounwind {
-; RV32IFD-LABEL: roundeven_f64:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call roundeven at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: roundeven_f64:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    call roundeven at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: roundeven_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail roundeven at plt
 ;
 ; RV32I-LABEL: roundeven_f64:
 ; RV32I:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll
index 7363f3a441934..16b40eabad821 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll
@@ -625,131 +625,41 @@ define i64 @test_roundeven_ui64(double %x) {
 }
 
 define double @test_floor_double(double %x) {
-; RV32IFD-LABEL: test_floor_double:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call floor at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: test_floor_double:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    .cfi_offset ra, -8
-; RV64IFD-NEXT:    call floor at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: test_floor_double:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail floor at plt
   %a = call double @llvm.floor.f64(double %x)
   ret double %a
 }
 
 define double @test_ceil_double(double %x) {
-; RV32IFD-LABEL: test_ceil_double:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call ceil at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: test_ceil_double:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    .cfi_offset ra, -8
-; RV64IFD-NEXT:    call ceil at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: test_ceil_double:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail ceil at plt
   %a = call double @llvm.ceil.f64(double %x)
   ret double %a
 }
 
 define double @test_trunc_double(double %x) {
-; RV32IFD-LABEL: test_trunc_double:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call trunc at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: test_trunc_double:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    .cfi_offset ra, -8
-; RV64IFD-NEXT:    call trunc at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: test_trunc_double:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail trunc at plt
   %a = call double @llvm.trunc.f64(double %x)
   ret double %a
 }
 
 define double @test_round_double(double %x) {
-; RV32IFD-LABEL: test_round_double:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call round at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: test_round_double:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    .cfi_offset ra, -8
-; RV64IFD-NEXT:    call round at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: test_round_double:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail round at plt
   %a = call double @llvm.round.f64(double %x)
   ret double %a
 }
 
 define double @test_roundeven_double(double %x) {
-; RV32IFD-LABEL: test_roundeven_double:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    .cfi_offset ra, -4
-; RV32IFD-NEXT:    call roundeven at plt
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: test_roundeven_double:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    .cfi_offset ra, -8
-; RV64IFD-NEXT:    call roundeven at plt
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: test_roundeven_double:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    tail roundeven at plt
   %a = call double @llvm.roundeven.f64(double %x)
   ret double %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index 88012f604d644..dfc2b5d72baae 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -55,12 +55,7 @@ declare float @llvm.powi.f32.i32(float, i32)
 define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV32IF-LABEL: powi_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call __powisf2 at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail __powisf2 at plt
 ;
 ; RV64IF-LABEL: powi_f32:
 ; RV64IF:       # %bb.0:
@@ -99,21 +94,11 @@ declare float @llvm.sin.f32(float)
 define float @sin_f32(float %a) nounwind {
 ; RV32IF-LABEL: sin_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call sinf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail sinf at plt
 ;
 ; RV64IF-LABEL: sin_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call sinf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail sinf at plt
 ;
 ; RV32I-LABEL: sin_f32:
 ; RV32I:       # %bb.0:
@@ -141,21 +126,11 @@ declare float @llvm.cos.f32(float)
 define float @cos_f32(float %a) nounwind {
 ; RV32IF-LABEL: cos_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call cosf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail cosf at plt
 ;
 ; RV64IF-LABEL: cos_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call cosf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail cosf at plt
 ;
 ; RV32I-LABEL: cos_f32:
 ; RV32I:       # %bb.0:
@@ -248,21 +223,11 @@ declare float @llvm.pow.f32(float, float)
 define float @pow_f32(float %a, float %b) nounwind {
 ; RV32IF-LABEL: pow_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call powf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail powf at plt
 ;
 ; RV64IF-LABEL: pow_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call powf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail powf at plt
 ;
 ; RV32I-LABEL: pow_f32:
 ; RV32I:       # %bb.0:
@@ -290,21 +255,11 @@ declare float @llvm.exp.f32(float)
 define float @exp_f32(float %a) nounwind {
 ; RV32IF-LABEL: exp_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call expf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail expf at plt
 ;
 ; RV64IF-LABEL: exp_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call expf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail expf at plt
 ;
 ; RV32I-LABEL: exp_f32:
 ; RV32I:       # %bb.0:
@@ -332,21 +287,11 @@ declare float @llvm.exp2.f32(float)
 define float @exp2_f32(float %a) nounwind {
 ; RV32IF-LABEL: exp2_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call exp2f at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail exp2f at plt
 ;
 ; RV64IF-LABEL: exp2_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call exp2f at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail exp2f at plt
 ;
 ; RV32I-LABEL: exp2_f32:
 ; RV32I:       # %bb.0:
@@ -374,21 +319,11 @@ declare float @llvm.log.f32(float)
 define float @log_f32(float %a) nounwind {
 ; RV32IF-LABEL: log_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call logf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail logf at plt
 ;
 ; RV64IF-LABEL: log_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call logf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail logf at plt
 ;
 ; RV32I-LABEL: log_f32:
 ; RV32I:       # %bb.0:
@@ -416,21 +351,11 @@ declare float @llvm.log10.f32(float)
 define float @log10_f32(float %a) nounwind {
 ; RV32IF-LABEL: log10_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call log10f at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail log10f at plt
 ;
 ; RV64IF-LABEL: log10_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call log10f at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail log10f at plt
 ;
 ; RV32I-LABEL: log10_f32:
 ; RV32I:       # %bb.0:
@@ -458,21 +383,11 @@ declare float @llvm.log2.f32(float)
 define float @log2_f32(float %a) nounwind {
 ; RV32IF-LABEL: log2_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call log2f at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail log2f at plt
 ;
 ; RV64IF-LABEL: log2_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call log2f at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail log2f at plt
 ;
 ; RV32I-LABEL: log2_f32:
 ; RV32I:       # %bb.0:
@@ -725,21 +640,11 @@ declare float @llvm.floor.f32(float)
 define float @floor_f32(float %a) nounwind {
 ; RV32IF-LABEL: floor_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call floorf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail floorf at plt
 ;
 ; RV64IF-LABEL: floor_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call floorf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail floorf at plt
 ;
 ; RV32I-LABEL: floor_f32:
 ; RV32I:       # %bb.0:
@@ -767,21 +672,11 @@ declare float @llvm.ceil.f32(float)
 define float @ceil_f32(float %a) nounwind {
 ; RV32IF-LABEL: ceil_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call ceilf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail ceilf at plt
 ;
 ; RV64IF-LABEL: ceil_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call ceilf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail ceilf at plt
 ;
 ; RV32I-LABEL: ceil_f32:
 ; RV32I:       # %bb.0:
@@ -809,21 +704,11 @@ declare float @llvm.trunc.f32(float)
 define float @trunc_f32(float %a) nounwind {
 ; RV32IF-LABEL: trunc_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call truncf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail truncf at plt
 ;
 ; RV64IF-LABEL: trunc_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call truncf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail truncf at plt
 ;
 ; RV32I-LABEL: trunc_f32:
 ; RV32I:       # %bb.0:
@@ -851,21 +736,11 @@ declare float @llvm.rint.f32(float)
 define float @rint_f32(float %a) nounwind {
 ; RV32IF-LABEL: rint_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call rintf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail rintf at plt
 ;
 ; RV64IF-LABEL: rint_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call rintf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail rintf at plt
 ;
 ; RV32I-LABEL: rint_f32:
 ; RV32I:       # %bb.0:
@@ -893,21 +768,11 @@ declare float @llvm.nearbyint.f32(float)
 define float @nearbyint_f32(float %a) nounwind {
 ; RV32IF-LABEL: nearbyint_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call nearbyintf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail nearbyintf at plt
 ;
 ; RV64IF-LABEL: nearbyint_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call nearbyintf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail nearbyintf at plt
 ;
 ; RV32I-LABEL: nearbyint_f32:
 ; RV32I:       # %bb.0:
@@ -935,21 +800,11 @@ declare float @llvm.round.f32(float)
 define float @round_f32(float %a) nounwind {
 ; RV32IF-LABEL: round_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call roundf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail roundf at plt
 ;
 ; RV64IF-LABEL: round_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call roundf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail roundf at plt
 ;
 ; RV32I-LABEL: round_f32:
 ; RV32I:       # %bb.0:
@@ -977,21 +832,11 @@ declare float @llvm.roundeven.f32(float)
 define float @roundeven_f32(float %a) nounwind {
 ; RV32IF-LABEL: roundeven_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    call roundevenf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail roundevenf at plt
 ;
 ; RV64IF-LABEL: roundeven_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    call roundevenf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail roundevenf at plt
 ;
 ; RV32I-LABEL: roundeven_f32:
 ; RV32I:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/float-round-conv.ll b/llvm/test/CodeGen/RISCV/float-round-conv.ll
index 52f6cde83d656..c2b7224e0f556 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv.ll
@@ -698,25 +698,11 @@ define float @test_floor_float(float %x) {
 ; RV64IFD-NEXT:    ret
 ; RV32IF-LABEL: test_floor_float:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    .cfi_def_cfa_offset 16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call floorf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail floorf at plt
 ;
 ; RV64IF-LABEL: test_floor_float:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    .cfi_def_cfa_offset 16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call floorf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail floorf at plt
   %a = call float @llvm.floor.f32(float %x)
   ret float %a
 }
@@ -745,25 +731,11 @@ define float @test_ceil_float(float %x) {
 ; RV64IFD-NEXT:    ret
 ; RV32IF-LABEL: test_ceil_float:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    .cfi_def_cfa_offset 16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call ceilf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail ceilf at plt
 ;
 ; RV64IF-LABEL: test_ceil_float:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    .cfi_def_cfa_offset 16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call ceilf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail ceilf at plt
   %a = call float @llvm.ceil.f32(float %x)
   ret float %a
 }
@@ -792,25 +764,11 @@ define float @test_trunc_float(float %x) {
 ; RV64IFD-NEXT:    ret
 ; RV32IF-LABEL: test_trunc_float:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    .cfi_def_cfa_offset 16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call truncf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail truncf at plt
 ;
 ; RV64IF-LABEL: test_trunc_float:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    .cfi_def_cfa_offset 16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call truncf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail truncf at plt
   %a = call float @llvm.trunc.f32(float %x)
   ret float %a
 }
@@ -839,25 +797,11 @@ define float @test_round_float(float %x) {
 ; RV64IFD-NEXT:    ret
 ; RV32IF-LABEL: test_round_float:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    .cfi_def_cfa_offset 16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call roundf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail roundf at plt
 ;
 ; RV64IF-LABEL: test_round_float:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    .cfi_def_cfa_offset 16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call roundf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail roundf at plt
   %a = call float @llvm.round.f32(float %x)
   ret float %a
 }
@@ -886,25 +830,11 @@ define float @test_roundeven_float(float %x) {
 ; RV64IFD-NEXT:    ret
 ; RV32IF-LABEL: test_roundeven_float:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    .cfi_def_cfa_offset 16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    .cfi_offset ra, -4
-; RV32IF-NEXT:    call roundevenf at plt
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
-; RV32IF-NEXT:    ret
+; RV32IF-NEXT:    tail roundevenf at plt
 ;
 ; RV64IF-LABEL: test_roundeven_float:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -16
-; RV64IF-NEXT:    .cfi_def_cfa_offset 16
-; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    .cfi_offset ra, -8
-; RV64IF-NEXT:    call roundevenf at plt
-; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 16
-; RV64IF-NEXT:    ret
+; RV64IF-NEXT:    tail roundevenf at plt
   %a = call float @llvm.roundeven.f32(float %x)
   ret float %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/fp16-promote.ll b/llvm/test/CodeGen/RISCV/fp16-promote.ll
index 08fa21fbf0dee..bc29eb47e0ba4 100644
--- a/llvm/test/CodeGen/RISCV/fp16-promote.ll
+++ b/llvm/test/CodeGen/RISCV/fp16-promote.ll
@@ -15,13 +15,8 @@ define void @test_load_store(half* %p, half* %q) nounwind {
 define float @test_fpextend_float(half* %p) nounwind {
 ; CHECK-LABEL: test_fpextend_float:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    lhu a0, 0(a0)
-; CHECK-NEXT:    call __extendhfsf2 at plt
-; CHECK-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+; CHECK-NEXT:    tail __extendhfsf2 at plt
   %a = load half, half* %p
   %r = fpext half %a to float
   ret float %r

diff  --git a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
index ab4c3d1799250..cc82d65a98a8e 100644
--- a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
+++ b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
@@ -106,12 +106,7 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64-ALL-LABEL: mul64:
 ; RV64-ALL:       # %bb.0:
-; RV64-ALL-NEXT:    addi sp, sp, -16
-; RV64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ALL-NEXT:    call __muldi3 at plt
-; RV64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-ALL-NEXT:    addi sp, sp, 16
-; RV64-ALL-NEXT:    ret
+; RV64-ALL-NEXT:    tail __muldi3 at plt
   %1 = mul i64 %a, %b
   ret i64 %1
 }
@@ -241,23 +236,27 @@ define half @sin_f16(half %a) nounwind {
 declare float @llvm.sin.f32(float)
 
 define float @sin_f32(float %a) nounwind {
-; RV32-ALL-LABEL: sin_f32:
-; RV32-ALL:       # %bb.0:
-; RV32-ALL-NEXT:    addi sp, sp, -16
-; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT:    call sinf at plt
-; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-ALL-NEXT:    addi sp, sp, 16
-; RV32-ALL-NEXT:    ret
+; F-ABI-ALL-LABEL: sin_f32:
+; F-ABI-ALL:       # %bb.0:
+; F-ABI-ALL-NEXT:    tail sinf at plt
 ;
-; RV64-ALL-LABEL: sin_f32:
-; RV64-ALL:       # %bb.0:
-; RV64-ALL-NEXT:    addi sp, sp, -16
-; RV64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ALL-NEXT:    call sinf at plt
-; RV64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-ALL-NEXT:    addi sp, sp, 16
-; RV64-ALL-NEXT:    ret
+; RV32-ILP32-ALL-LABEL: sin_f32:
+; RV32-ILP32-ALL:       # %bb.0:
+; RV32-ILP32-ALL-NEXT:    addi sp, sp, -16
+; RV32-ILP32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ILP32-ALL-NEXT:    call sinf at plt
+; RV32-ILP32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ILP32-ALL-NEXT:    addi sp, sp, 16
+; RV32-ILP32-ALL-NEXT:    ret
+;
+; RV64-LP64-ALL-LABEL: sin_f32:
+; RV64-LP64-ALL:       # %bb.0:
+; RV64-LP64-ALL-NEXT:    addi sp, sp, -16
+; RV64-LP64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-LP64-ALL-NEXT:    call sinf at plt
+; RV64-LP64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-LP64-ALL-NEXT:    addi sp, sp, 16
+; RV64-LP64-ALL-NEXT:    ret
   %1 = call float @llvm.sin.f32(float %a)
   ret float %1
 }
@@ -265,14 +264,22 @@ define float @sin_f32(float %a) nounwind {
 declare float @llvm.powi.f32.i32(float, i32)
 
 define float @powi_f32(float %a, i32 %b) nounwind {
-; RV32-ALL-LABEL: powi_f32:
-; RV32-ALL:       # %bb.0:
-; RV32-ALL-NEXT:    addi sp, sp, -16
-; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT:    call __powisf2 at plt
-; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-ALL-NEXT:    addi sp, sp, 16
-; RV32-ALL-NEXT:    ret
+; RV32IFD-ILP32D-LABEL: powi_f32:
+; RV32IFD-ILP32D:       # %bb.0:
+; RV32IFD-ILP32D-NEXT:    tail __powisf2 at plt
+;
+; RV32IF-ILP32F-LABEL: powi_f32:
+; RV32IF-ILP32F:       # %bb.0:
+; RV32IF-ILP32F-NEXT:    tail __powisf2 at plt
+;
+; RV32-ILP32-ALL-LABEL: powi_f32:
+; RV32-ILP32-ALL:       # %bb.0:
+; RV32-ILP32-ALL-NEXT:    addi sp, sp, -16
+; RV32-ILP32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ILP32-ALL-NEXT:    call __powisf2 at plt
+; RV32-ILP32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ILP32-ALL-NEXT:    addi sp, sp, 16
+; RV32-ILP32-ALL-NEXT:    ret
 ;
 ; RV64IFD-LP64D-LABEL: powi_f32:
 ; RV64IFD-LP64D:       # %bb.0:
@@ -352,23 +359,45 @@ define i64 @llround_f32(float %a) nounwind {
 declare double @llvm.sin.f64(double)
 
 define double @sin_f64(double %a) nounwind {
-; RV32-ALL-LABEL: sin_f64:
-; RV32-ALL:       # %bb.0:
-; RV32-ALL-NEXT:    addi sp, sp, -16
-; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT:    call sin at plt
-; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-ALL-NEXT:    addi sp, sp, 16
-; RV32-ALL-NEXT:    ret
+; D-ABI-ALL-LABEL: sin_f64:
+; D-ABI-ALL:       # %bb.0:
+; D-ABI-ALL-NEXT:    tail sin at plt
 ;
-; RV64-ALL-LABEL: sin_f64:
-; RV64-ALL:       # %bb.0:
-; RV64-ALL-NEXT:    addi sp, sp, -16
-; RV64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ALL-NEXT:    call sin at plt
-; RV64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-ALL-NEXT:    addi sp, sp, 16
-; RV64-ALL-NEXT:    ret
+; RV32IF-ILP32F-LABEL: sin_f64:
+; RV32IF-ILP32F:       # %bb.0:
+; RV32IF-ILP32F-NEXT:    addi sp, sp, -16
+; RV32IF-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-ILP32F-NEXT:    call sin at plt
+; RV32IF-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-ILP32F-NEXT:    addi sp, sp, 16
+; RV32IF-ILP32F-NEXT:    ret
+;
+; RV32-ILP32-ALL-LABEL: sin_f64:
+; RV32-ILP32-ALL:       # %bb.0:
+; RV32-ILP32-ALL-NEXT:    addi sp, sp, -16
+; RV32-ILP32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ILP32-ALL-NEXT:    call sin at plt
+; RV32-ILP32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ILP32-ALL-NEXT:    addi sp, sp, 16
+; RV32-ILP32-ALL-NEXT:    ret
+;
+; RV64IF-LP64F-LABEL: sin_f64:
+; RV64IF-LP64F:       # %bb.0:
+; RV64IF-LP64F-NEXT:    addi sp, sp, -16
+; RV64IF-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-LP64F-NEXT:    call sin at plt
+; RV64IF-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-LP64F-NEXT:    addi sp, sp, 16
+; RV64IF-LP64F-NEXT:    ret
+;
+; RV64-LP64-ALL-LABEL: sin_f64:
+; RV64-LP64-ALL:       # %bb.0:
+; RV64-LP64-ALL-NEXT:    addi sp, sp, -16
+; RV64-LP64-ALL-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-LP64-ALL-NEXT:    call sin at plt
+; RV64-LP64-ALL-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-LP64-ALL-NEXT:    addi sp, sp, 16
+; RV64-LP64-ALL-NEXT:    ret
   %1 = call double @llvm.sin.f64(double %a)
   ret double %1
 }
@@ -376,14 +405,27 @@ define double @sin_f64(double %a) nounwind {
 declare double @llvm.powi.f64.i32(double, i32)
 
 define double @powi_f64(double %a, i32 %b) nounwind {
-; RV32-ALL-LABEL: powi_f64:
-; RV32-ALL:       # %bb.0:
-; RV32-ALL-NEXT:    addi sp, sp, -16
-; RV32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT:    call __powidf2 at plt
-; RV32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-ALL-NEXT:    addi sp, sp, 16
-; RV32-ALL-NEXT:    ret
+; RV32IFD-ILP32D-LABEL: powi_f64:
+; RV32IFD-ILP32D:       # %bb.0:
+; RV32IFD-ILP32D-NEXT:    tail __powidf2 at plt
+;
+; RV32IF-ILP32F-LABEL: powi_f64:
+; RV32IF-ILP32F:       # %bb.0:
+; RV32IF-ILP32F-NEXT:    addi sp, sp, -16
+; RV32IF-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-ILP32F-NEXT:    call __powidf2 at plt
+; RV32IF-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-ILP32F-NEXT:    addi sp, sp, 16
+; RV32IF-ILP32F-NEXT:    ret
+;
+; RV32-ILP32-ALL-LABEL: powi_f64:
+; RV32-ILP32-ALL:       # %bb.0:
+; RV32-ILP32-ALL-NEXT:    addi sp, sp, -16
+; RV32-ILP32-ALL-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ILP32-ALL-NEXT:    call __powidf2 at plt
+; RV32-ILP32-ALL-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ILP32-ALL-NEXT:    addi sp, sp, 16
+; RV32-ILP32-ALL-NEXT:    ret
 ;
 ; RV64IFD-LP64D-LABEL: powi_f64:
 ; RV64IFD-LP64D:       # %bb.0:
@@ -561,6 +603,3 @@ define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) nounwind {
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; ALL: {{.*}}
-; D-ABI-ALL: {{.*}}
-; F-ABI-ALL: {{.*}}
-; RV32-ILP32-ALL: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index ba6fcc4122be9..3923c4340d30e 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -150,12 +150,7 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64I-LABEL: mul64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __muldi3 at plt
 ;
 ; RV64IM-LABEL: mul64:
 ; RV64IM:       # %bb.0:
@@ -755,13 +750,8 @@ define i64 @muli64_m65(i64 %a) nounwind {
 define i32 @muli32_p384(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p384:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 384
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __mulsi3 at plt
 ;
 ; RV32IM-LABEL: muli32_p384:
 ; RV32IM:       # %bb.0:
@@ -791,13 +781,8 @@ define i32 @muli32_p384(i32 %a) nounwind {
 define i32 @muli32_p12288(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p12288:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a1, 3
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __mulsi3 at plt
 ;
 ; RV32IM-LABEL: muli32_p12288:
 ; RV32IM:       # %bb.0:
@@ -923,14 +908,9 @@ define i32 @muli32_m3840(i32 %a) nounwind {
 define i32 @muli32_m4352(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_m4352:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a1, 1048575
 ; RV32I-NEXT:    addi a1, a1, -256
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __mulsi3 at plt
 ;
 ; RV32IM-LABEL: muli32_m4352:
 ; RV32IM:       # %bb.0:
@@ -1074,14 +1054,9 @@ define i64 @muli64_m4352(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: muli64_m4352:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lui a1, 1048575
 ; RV64I-NEXT:    addiw a1, a1, -256
-; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __muldi3 at plt
 ;
 ; RV64IM-LABEL: muli64_m4352:
 ; RV64IM:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll
index 9da7044d8de59..3d5fd3fd43317 100644
--- a/llvm/test/CodeGen/RISCV/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rem.ll
@@ -11,12 +11,7 @@
 define i32 @urem(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: urem:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __umodsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __umodsi3 at plt
 ;
 ; RV32IM-LABEL: urem:
 ; RV32IM:       # %bb.0:
@@ -47,14 +42,9 @@ define i32 @urem(i32 %a, i32 %b) nounwind {
 define i32 @urem_constant_lhs(i32 %a) nounwind {
 ; RV32I-LABEL: urem_constant_lhs:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    li a0, 10
-; RV32I-NEXT:    call __umodsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __umodsi3 at plt
 ;
 ; RV32IM-LABEL: urem_constant_lhs:
 ; RV32IM:       # %bb.0:
@@ -86,12 +76,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
 define i32 @srem(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: srem:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call __modsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __modsi3 at plt
 ;
 ; RV32IM-LABEL: srem:
 ; RV32IM:       # %bb.0:
@@ -204,14 +189,9 @@ define i32 @srem_pow2_2(i32 %a) nounwind {
 define i32 @srem_constant_lhs(i32 %a) nounwind {
 ; RV32I-LABEL: srem_constant_lhs:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    li a0, -10
-; RV32I-NEXT:    call __modsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __modsi3 at plt
 ;
 ; RV32IM-LABEL: srem_constant_lhs:
 ; RV32IM:       # %bb.0:
@@ -260,12 +240,7 @@ define i64 @urem64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64I-LABEL: urem64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __umoddi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __umoddi3 at plt
 ;
 ; RV64IM-LABEL: urem64:
 ; RV64IM:       # %bb.0:
@@ -304,14 +279,9 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: urem64_constant_lhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, 10
-; RV64I-NEXT:    call __umoddi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __umoddi3 at plt
 ;
 ; RV64IM-LABEL: urem64_constant_lhs:
 ; RV64IM:       # %bb.0:
@@ -343,12 +313,7 @@ define i64 @srem64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64I-LABEL: srem64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call __moddi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __moddi3 at plt
 ;
 ; RV64IM-LABEL: srem64:
 ; RV64IM:       # %bb.0:
@@ -387,14 +352,9 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: srem64_constant_lhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    li a0, -10
-; RV64I-NEXT:    call __moddi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __moddi3 at plt
 ;
 ; RV64IM-LABEL: srem64_constant_lhs:
 ; RV64IM:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll
index a1a9a06d42b18..c28a92603e739 100644
--- a/llvm/test/CodeGen/RISCV/srem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll
@@ -11,13 +11,8 @@
 define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_positive_odd:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 95
-; RV32I-NEXT:    call __modsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __modsi3 at plt
 ;
 ; RV32IM-LABEL: fold_srem_positive_odd:
 ; RV32IM:       # %bb.0:
@@ -67,13 +62,8 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_positive_even:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 1060
-; RV32I-NEXT:    call __modsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __modsi3 at plt
 ;
 ; RV32IM-LABEL: fold_srem_positive_even:
 ; RV32IM:       # %bb.0:
@@ -120,13 +110,8 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_negative_odd:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, -723
-; RV32I-NEXT:    call __modsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __modsi3 at plt
 ;
 ; RV32IM-LABEL: fold_srem_negative_odd:
 ; RV32IM:       # %bb.0:
@@ -173,14 +158,9 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 define i32 @fold_srem_negative_even(i32 %x) nounwind {
 ; RV32I-LABEL: fold_srem_negative_even:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a1, 1048570
 ; RV32I-NEXT:    addi a1, a1, 1595
-; RV32I-NEXT:    call __modsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __modsi3 at plt
 ;
 ; RV32IM-LABEL: fold_srem_negative_even:
 ; RV32IM:       # %bb.0:
@@ -429,13 +409,8 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
 ;
 ; RV64I-LABEL: dont_fold_srem_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 98
-; RV64I-NEXT:    call __moddi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __moddi3 at plt
 ;
 ; RV64IM-LABEL: dont_fold_srem_i64:
 ; RV64IM:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll
index 35f4d0bbb30ef..715deddbe19e5 100644
--- a/llvm/test/CodeGen/RISCV/urem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll
@@ -11,13 +11,8 @@
 define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV32I-LABEL: fold_urem_positive_odd:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 95
-; RV32I-NEXT:    call __umodsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __umodsi3 at plt
 ;
 ; RV32IM-LABEL: fold_urem_positive_odd:
 ; RV32IM:       # %bb.0:
@@ -69,13 +64,8 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV32I-LABEL: fold_urem_positive_even:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    li a1, 1060
-; RV32I-NEXT:    call __umodsi3 at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
+; RV32I-NEXT:    tail __umodsi3 at plt
 ;
 ; RV32IM-LABEL: fold_urem_positive_even:
 ; RV32IM:       # %bb.0:
@@ -254,13 +244,8 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
 ;
 ; RV64I-LABEL: dont_fold_urem_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li a1, 98
-; RV64I-NEXT:    call __umoddi3 at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
+; RV64I-NEXT:    tail __umoddi3 at plt
 ;
 ; RV64IM-LABEL: dont_fold_urem_i64:
 ; RV64IM:       # %bb.0:


        


More information about the llvm-commits mailing list