[llvm] 644899a - [RISCV][GISel] Port portions of float-intrinsics.ll and double-intrinsics.ll. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 18 13:15:38 PDT 2024


Author: Craig Topper
Date: 2024-09-18T13:13:34-07:00
New Revision: 644899addd8fd789c93e9a0f0727d37eb1b29c55

URL: https://github.com/llvm/llvm-project/commit/644899addd8fd789c93e9a0f0727d37eb1b29c55
DIFF: https://github.com/llvm/llvm-project/commit/644899addd8fd789c93e9a0f0727d37eb1b29c55.diff

LOG: [RISCV][GISel] Port portions of float-intrinsics.ll and double-intrinsics.ll. NFC

Remove the legalizer test for the same intrinsics as it is no longer
interesting with end to end tests.

Added: 
    llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
    llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fp-libcall.mir


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
new file mode 100644
index 00000000000000..ad461f8f24b917
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
@@ -0,0 +1,264 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+d \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d \
+; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d \
+; RUN:   -verify-machineinstrs -target-abi=lp64d \
+; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
+
+declare double @llvm.sqrt.f64(double)
+
+define double @sqrt_f64(double %a) nounwind {
+; CHECKIFD-LABEL: sqrt_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    fsqrt.d fa0, fa0
+; CHECKIFD-NEXT:    ret
+  %1 = call double @llvm.sqrt.f64(double %a)
+  ret double %1
+}
+
+declare double @llvm.fma.f64(double, double, double)
+
+define double @fma_f64(double %a, double %b, double %c) nounwind {
+; CHECKIFD-LABEL: fma_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
+; CHECKIFD-NEXT:    ret
+  %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
+  ret double %1
+}
+
+declare double @llvm.fmuladd.f64(double, double, double)
+
+define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
+; CHECKIFD-LABEL: fmuladd_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
+; CHECKIFD-NEXT:    ret
+  %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
+  ret double %1
+}
+
+declare double @llvm.fabs.f64(double)
+
+define double @fabs_f64(double %a) nounwind {
+; CHECKIFD-LABEL: fabs_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    fabs.d fa0, fa0
+; CHECKIFD-NEXT:    ret
+  %1 = call double @llvm.fabs.f64(double %a)
+  ret double %1
+}
+
+declare double @llvm.minnum.f64(double, double)
+
+define double @minnum_f64(double %a, double %b) nounwind {
+; CHECKIFD-LABEL: minnum_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    fmin.d fa0, fa0, fa1
+; CHECKIFD-NEXT:    ret
+  %1 = call double @llvm.minnum.f64(double %a, double %b)
+  ret double %1
+}
+
+declare double @llvm.maxnum.f64(double, double)
+
+define double @maxnum_f64(double %a, double %b) nounwind {
+; CHECKIFD-LABEL: maxnum_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    fmax.d fa0, fa0, fa1
+; CHECKIFD-NEXT:    ret
+  %1 = call double @llvm.maxnum.f64(double %a, double %b)
+  ret double %1
+}
+
+declare double @llvm.copysign.f64(double, double)
+
+define double @copysign_f64(double %a, double %b) nounwind {
+; CHECKIFD-LABEL: copysign_f64:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    fsgnj.d fa0, fa0, fa1
+; CHECKIFD-NEXT:    ret
+  %1 = call double @llvm.copysign.f64(double %a, double %b)
+  ret double %1
+}
+
+declare double @llvm.floor.f64(double)
+
+define double @floor_f64(double %a) nounwind {
+; RV32IFD-LABEL: floor_f64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call floor
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: floor_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call floor
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = call double @llvm.floor.f64(double %a)
+  ret double %1
+}
+
+declare double @llvm.ceil.f64(double)
+
+define double @ceil_f64(double %a) nounwind {
+; RV32IFD-LABEL: ceil_f64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call ceil
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: ceil_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call ceil
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = call double @llvm.ceil.f64(double %a)
+  ret double %1
+}
+
+declare double @llvm.trunc.f64(double)
+
+define double @trunc_f64(double %a) nounwind {
+; RV32IFD-LABEL: trunc_f64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call trunc
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: trunc_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call trunc
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = call double @llvm.trunc.f64(double %a)
+  ret double %1
+}
+
+declare double @llvm.rint.f64(double)
+
+define double @rint_f64(double %a) nounwind {
+; RV32IFD-LABEL: rint_f64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call rint
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: rint_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call rint
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = call double @llvm.rint.f64(double %a)
+  ret double %1
+}
+
+declare double @llvm.nearbyint.f64(double)
+
+define double @nearbyint_f64(double %a) nounwind {
+; RV32IFD-LABEL: nearbyint_f64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call nearbyint
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: nearbyint_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call nearbyint
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = call double @llvm.nearbyint.f64(double %a)
+  ret double %1
+}
+
+declare double @llvm.round.f64(double)
+
+define double @round_f64(double %a) nounwind {
+; RV32IFD-LABEL: round_f64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call round
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: round_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call round
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = call double @llvm.round.f64(double %a)
+  ret double %1
+}
+
+declare double @llvm.roundeven.f64(double)
+
+define double @roundeven_f64(double %a) nounwind {
+; RV32IFD-LABEL: roundeven_f64:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    call roundeven
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: roundeven_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    call roundeven
+; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = call double @llvm.roundeven.f64(double %a)
+  ret double %1
+}
+
+declare i1 @llvm.is.fpclass.f64(double, i32)
+define i1 @isnan_d_fpclass(double %x) {
+; CHECKIFD-LABEL: isnan_d_fpclass:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    fclass.d a0, fa0
+; CHECKIFD-NEXT:    andi a0, a0, 768
+; CHECKIFD-NEXT:    snez a0, a0
+; CHECKIFD-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f64(double %x, i32 3)  ; nan
+  ret i1 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
new file mode 100644
index 00000000000000..39a5beb317ab91
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
@@ -0,0 +1,441 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+f \
+; RUN:   -verify-machineinstrs -target-abi=ilp32f \
+; RUN:   | FileCheck -check-prefix=RV32IF %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+d \
+; RUN:   -verify-machineinstrs -target-abi=ilp32f \
+; RUN:   | FileCheck -check-prefix=RV32IF %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+f \
+; RUN:   -verify-machineinstrs -target-abi=lp64f \
+; RUN:   | FileCheck -check-prefix=RV64IF %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel  -mattr=+d \
+; RUN:   -verify-machineinstrs -target-abi=lp64d \
+; RUN:   | FileCheck -check-prefix=RV64IF %s
+
+define float @sqrt_f32(float %a) nounwind {
+; RV32IF-LABEL: sqrt_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fsqrt.s fa0, fa0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: sqrt_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fsqrt.s fa0, fa0
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.sqrt.f32(float %a)
+  ret float %1
+}
+
+define float @fma_f32(float %a, float %b, float %c) nounwind {
+; RV32IF-LABEL: fma_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fma_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.fma.f32(float %a, float %b, float %c)
+  ret float %1
+}
+
+define float @fmuladd_f32(float %a, float %b, float %c) nounwind {
+; RV32IF-LABEL: fmuladd_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fmuladd_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+  ret float %1
+}
+
+define float @fabs_f32(float %a) nounwind {
+; RV32IF-LABEL: fabs_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fabs.s fa0, fa0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fabs_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fabs.s fa0, fa0
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.fabs.f32(float %a)
+  ret float %1
+}
+
+define float @minnum_f32(float %a, float %b) nounwind {
+; RV32IF-LABEL: minnum_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fmin.s fa0, fa0, fa1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: minnum_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fmin.s fa0, fa0, fa1
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.minnum.f32(float %a, float %b)
+  ret float %1
+}
+
+define float @maxnum_f32(float %a, float %b) nounwind {
+; RV32IF-LABEL: maxnum_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fmax.s fa0, fa0, fa1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: maxnum_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fmax.s fa0, fa0, fa1
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.maxnum.f32(float %a, float %b)
+  ret float %1
+}
+
+define float @copysign_f32(float %a, float %b) nounwind {
+; RV32IF-LABEL: copysign_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fsgnj.s fa0, fa0, fa1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: copysign_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fsgnj.s fa0, fa0, fa1
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.copysign.f32(float %a, float %b)
+  ret float %1
+}
+
+define float @ceil_f32(float %a) nounwind {
+; RV32IF-LABEL: ceil_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call ceilf
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: ceil_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call ceilf
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.ceil.f32(float %a)
+  ret float %1
+}
+
+define float @trunc_f32(float %a) nounwind {
+; RV32IF-LABEL: trunc_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call truncf
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: trunc_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call truncf
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.trunc.f32(float %a)
+  ret float %1
+}
+
+define float @rint_f32(float %a) nounwind {
+; RV32IF-LABEL: rint_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call rintf
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: rint_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call rintf
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.rint.f32(float %a)
+  ret float %1
+}
+
+define float @nearbyint_f32(float %a) nounwind {
+; RV32IF-LABEL: nearbyint_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call nearbyintf
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: nearbyint_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call nearbyintf
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.nearbyint.f32(float %a)
+  ret float %1
+}
+
+define float @round_f32(float %a) nounwind {
+; RV32IF-LABEL: round_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call roundf
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: round_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call roundf
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.round.f32(float %a)
+  ret float %1
+}
+
+define float @roundeven_f32(float %a) nounwind {
+; RV32IF-LABEL: roundeven_f32:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    call roundevenf
+; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: roundeven_f32:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    call roundevenf
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+  %1 = call float @llvm.roundeven.f32(float %a)
+  ret float %1
+}
+
+define i1 @fpclass(float %x) {
+; RV32IF-LABEL: fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 927
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 927
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %cmp = call i1 @llvm.is.fpclass.f32(float %x, i32 639)
+  ret i1 %cmp
+}
+
+define i1 @isnan_fpclass(float %x) {
+; RV32IF-LABEL: isnan_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 768
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isnan_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 768
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 3)  ; nan
+  ret i1 %1
+}
+
+define i1 @isqnan_fpclass(float %x) {
+; RV32IF-LABEL: isqnan_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 512
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isqnan_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 512
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 2)  ; qnan
+  ret i1 %1
+}
+
+define i1 @issnan_fpclass(float %x) {
+; RV32IF-LABEL: issnan_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 256
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: issnan_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 256
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 1)  ; snan
+  ret i1 %1
+}
+
+define i1 @isinf_fpclass(float %x) {
+; RV32IF-LABEL: isinf_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 129
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isinf_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 129
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 516)  ; 0x204 = "inf"
+  ret i1 %1
+}
+
+define i1 @isposinf_fpclass(float %x) {
+; RV32IF-LABEL: isposinf_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 128
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isposinf_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 128
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 512)  ; 0x200 = "+inf"
+  ret i1 %1
+}
+
+define i1 @isneginf_fpclass(float %x) {
+; RV32IF-LABEL: isneginf_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 1
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isneginf_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 1
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 4)  ; "-inf"
+  ret i1 %1
+}
+
+define i1 @isfinite_fpclass(float %x) {
+; RV32IF-LABEL: isfinite_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 126
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isfinite_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 126
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 504)  ; 0x1f8 = "finite"
+  ret i1 %1
+}
+
+define i1 @isposfinite_fpclass(float %x) {
+; RV32IF-LABEL: isposfinite_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 112
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isposfinite_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 112
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 448)  ; 0x1c0 = "+finite"
+  ret i1 %1
+}
+
+define i1 @isnegfinite_fpclass(float %x) {
+; RV32IF-LABEL: isnegfinite_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 14
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isnegfinite_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 14
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 56)  ; 0x38 = "-finite"
+  ret i1 %1
+}
+
+define i1 @isnotfinite_fpclass(float %x) {
+; RV32IF-LABEL: isnotfinite_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 897
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isnotfinite_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 897
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 519)  ; ox207 = "inf|nan"
+  ret i1 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fp-libcall.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fp-libcall.mir
deleted file mode 100644
index 3b4f6a065d9736..00000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fp-libcall.mir
+++ /dev/null
@@ -1,328 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=riscv32 -mattr=+d -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s
-# RUN: llc -mtriple=riscv64 -mattr=+d -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s
-
----
-name:            ceil_f32
-body:             |
-  bb.1:
-    liveins: $f10_f
-
-    ; CHECK-LABEL: name: ceil_f32
-    ; CHECK: liveins: $f10_f
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_f = COPY [[COPY]](s32)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &ceilf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: $f10_f = COPY [[COPY1]](s32)
-    ; CHECK-NEXT: PseudoRET implicit $f10_f
-    %0:_(s32) = COPY $f10_f
-    %1:_(s32) = G_FCEIL %0
-    $f10_f = COPY %1(s32)
-    PseudoRET implicit $f10_f
-
-...
----
-name:            floor_f32
-body:             |
-  bb.1:
-    liveins: $f10_f
-
-    ; CHECK-LABEL: name: floor_f32
-    ; CHECK: liveins: $f10_f
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_f = COPY [[COPY]](s32)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &floorf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: $f10_f = COPY [[COPY1]](s32)
-    ; CHECK-NEXT: PseudoRET implicit $f10_f
-    %0:_(s32) = COPY $f10_f
-    %1:_(s32) = G_FFLOOR %0
-    $f10_f = COPY %1(s32)
-    PseudoRET implicit $f10_f
-
-...
----
-name:            trunc_f32
-body:             |
-  bb.1:
-    liveins: $f10_f
-
-    ; CHECK-LABEL: name: trunc_f32
-    ; CHECK: liveins: $f10_f
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_f = COPY [[COPY]](s32)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &truncf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: $f10_f = COPY [[COPY1]](s32)
-    ; CHECK-NEXT: PseudoRET implicit $f10_f
-    %0:_(s32) = COPY $f10_f
-    %1:_(s32) = G_INTRINSIC_TRUNC %0
-    $f10_f = COPY %1(s32)
-    PseudoRET implicit $f10_f
-
-...
----
-name:            rint_f32
-body:             |
-  bb.1:
-    liveins: $f10_f
-
-    ; CHECK-LABEL: name: rint_f32
-    ; CHECK: liveins: $f10_f
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_f = COPY [[COPY]](s32)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &rintf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: $f10_f = COPY [[COPY1]](s32)
-    ; CHECK-NEXT: PseudoRET implicit $f10_f
-    %0:_(s32) = COPY $f10_f
-    %1:_(s32) = G_FRINT %0
-    $f10_f = COPY %1(s32)
-    PseudoRET implicit $f10_f
-
-...
----
-name:            nearbyint_f32
-body:             |
-  bb.1:
-    liveins: $f10_f
-
-    ; CHECK-LABEL: name: nearbyint_f32
-    ; CHECK: liveins: $f10_f
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_f = COPY [[COPY]](s32)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &nearbyintf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: $f10_f = COPY [[COPY1]](s32)
-    ; CHECK-NEXT: PseudoRET implicit $f10_f
-    %0:_(s32) = COPY $f10_f
-    %1:_(s32) = G_FNEARBYINT %0
-    $f10_f = COPY %1(s32)
-    PseudoRET implicit $f10_f
-
-...
----
-name:            round_f32
-body:             |
-  bb.1:
-    liveins: $f10_f
-
-    ; CHECK-LABEL: name: round_f32
-    ; CHECK: liveins: $f10_f
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_f = COPY [[COPY]](s32)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &roundf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: $f10_f = COPY [[COPY1]](s32)
-    ; CHECK-NEXT: PseudoRET implicit $f10_f
-    %0:_(s32) = COPY $f10_f
-    %1:_(s32) = G_INTRINSIC_ROUND %0
-    $f10_f = COPY %1(s32)
-    PseudoRET implicit $f10_f
-
-...
----
-name:            roundeven_f32
-body:             |
-  bb.1:
-    liveins: $f10_f
-
-    ; CHECK-LABEL: name: roundeven_f32
-    ; CHECK: liveins: $f10_f
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_f = COPY [[COPY]](s32)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &roundevenf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
-    ; CHECK-NEXT: $f10_f = COPY [[COPY1]](s32)
-    ; CHECK-NEXT: PseudoRET implicit $f10_f
-    %0:_(s32) = COPY $f10_f
-    %1:_(s32) = G_INTRINSIC_ROUNDEVEN %0
-    $f10_f = COPY %1(s32)
-    PseudoRET implicit $f10_f
-
-...
----
-name:            ceil_f64
-body:             |
-  bb.1:
-    liveins: $f10_d
-
-    ; CHECK-LABEL: name: ceil_f64
-    ; CHECK: liveins: $f10_d
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_d = COPY [[COPY]](s64)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &ceil, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: $f10_d = COPY [[COPY1]](s64)
-    ; CHECK-NEXT: PseudoRET implicit $f10_d
-    %0:_(s64) = COPY $f10_d
-    %1:_(s64) = G_FCEIL %0
-    $f10_d = COPY %1(s64)
-    PseudoRET implicit $f10_d
-
-...
----
-name:            floor_f64
-body:             |
-  bb.1:
-    liveins: $f10_d
-
-    ; CHECK-LABEL: name: floor_f64
-    ; CHECK: liveins: $f10_d
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_d = COPY [[COPY]](s64)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &floor, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: $f10_d = COPY [[COPY1]](s64)
-    ; CHECK-NEXT: PseudoRET implicit $f10_d
-    %0:_(s64) = COPY $f10_d
-    %1:_(s64) = G_FFLOOR %0
-    $f10_d = COPY %1(s64)
-    PseudoRET implicit $f10_d
-
-...
----
-name:            trunc_f64
-body:             |
-  bb.1:
-    liveins: $f10_d
-
-    ; CHECK-LABEL: name: trunc_f64
-    ; CHECK: liveins: $f10_d
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_d = COPY [[COPY]](s64)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &trunc, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: $f10_d = COPY [[COPY1]](s64)
-    ; CHECK-NEXT: PseudoRET implicit $f10_d
-    %0:_(s64) = COPY $f10_d
-    %1:_(s64) = G_INTRINSIC_TRUNC %0
-    $f10_d = COPY %1(s64)
-    PseudoRET implicit $f10_d
-
-...
----
-name:            rint_f64
-body:             |
-  bb.1:
-    liveins: $f10_d
-
-    ; CHECK-LABEL: name: rint_f64
-    ; CHECK: liveins: $f10_d
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_d = COPY [[COPY]](s64)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &rint, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: $f10_d = COPY [[COPY1]](s64)
-    ; CHECK-NEXT: PseudoRET implicit $f10_d
-    %0:_(s64) = COPY $f10_d
-    %1:_(s64) = G_FRINT %0
-    $f10_d = COPY %1(s64)
-    PseudoRET implicit $f10_d
-
-...
----
-name:            nearbyint_f64
-body:             |
-  bb.1:
-    liveins: $f10_d
-
-    ; CHECK-LABEL: name: nearbyint_f64
-    ; CHECK: liveins: $f10_d
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_d = COPY [[COPY]](s64)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &nearbyint, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: $f10_d = COPY [[COPY1]](s64)
-    ; CHECK-NEXT: PseudoRET implicit $f10_d
-    %0:_(s64) = COPY $f10_d
-    %1:_(s64) = G_FNEARBYINT %0
-    $f10_d = COPY %1(s64)
-    PseudoRET implicit $f10_d
-
-...
----
-name:            round_f64
-body:             |
-  bb.1:
-    liveins: $f10_d
-
-    ; CHECK-LABEL: name: round_f64
-    ; CHECK: liveins: $f10_d
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_d = COPY [[COPY]](s64)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &round, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: $f10_d = COPY [[COPY1]](s64)
-    ; CHECK-NEXT: PseudoRET implicit $f10_d
-    %0:_(s64) = COPY $f10_d
-    %1:_(s64) = G_INTRINSIC_ROUND %0
-    $f10_d = COPY %1(s64)
-    PseudoRET implicit $f10_d
-
-...
----
-name:            roundeven_f64
-body:             |
-  bb.1:
-    liveins: $f10_d
-
-    ; CHECK-LABEL: name: roundeven_f64
-    ; CHECK: liveins: $f10_d
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: $f10_d = COPY [[COPY]](s64)
-    ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &roundeven, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
-    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
-    ; CHECK-NEXT: $f10_d = COPY [[COPY1]](s64)
-    ; CHECK-NEXT: PseudoRET implicit $f10_d
-    %0:_(s64) = COPY $f10_d
-    %1:_(s64) = G_INTRINSIC_ROUNDEVEN %0
-    $f10_d = COPY %1(s64)
-    PseudoRET implicit $f10_d
-
-...


        


More information about the llvm-commits mailing list