[llvm] b271184 - [RISCV] Use FP ABI on some of the FP tests to reduce the number of CHECK lines. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 10 09:08:47 PST 2022


Author: Craig Topper
Date: 2022-01-10T09:08:29-08:00
New Revision: b271184f07ea20ab05a40c0806bab46572e41405

URL: https://github.com/llvm/llvm-project/commit/b271184f07ea20ab05a40c0806bab46572e41405
DIFF: https://github.com/llvm/llvm-project/commit/b271184f07ea20ab05a40c0806bab46572e41405.diff

LOG: [RISCV] Use FP ABI on some of the FP tests to reduce the number of CHECK lines. NFC

These tests are interested in the FP instructions being used, not
the conversions needed to pass the arguments/returns in GPRs.

Reviewed By: asb

Differential Revision: https://reviews.llvm.org/D116869

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/double-arith-strict.ll
    llvm/test/CodeGen/RISCV/double-arith.ll
    llvm/test/CodeGen/RISCV/double-br-fcmp.ll
    llvm/test/CodeGen/RISCV/double-convert-strict.ll
    llvm/test/CodeGen/RISCV/double-convert.ll
    llvm/test/CodeGen/RISCV/double-fcmp.ll
    llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
    llvm/test/CodeGen/RISCV/double-intrinsics.ll
    llvm/test/CodeGen/RISCV/double-select-fcmp.ll
    llvm/test/CodeGen/RISCV/float-arith-strict.ll
    llvm/test/CodeGen/RISCV/float-arith.ll
    llvm/test/CodeGen/RISCV/float-br-fcmp.ll
    llvm/test/CodeGen/RISCV/float-convert-strict.ll
    llvm/test/CodeGen/RISCV/float-convert.ll
    llvm/test/CodeGen/RISCV/float-fcmp.ll
    llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
    llvm/test/CodeGen/RISCV/float-intrinsics.ll
    llvm/test/CodeGen/RISCV/float-select-fcmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
index 80d01567def54..9f207cde78f61 100644
--- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
@@ -1,8 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32IFD %s
+; RUN:   -disable-strictnode-mutation -target-abi=ilp32d \
+; RUN:   | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64IFD %s
+; RUN:   -disable-strictnode-mutation -target-abi=lp64d \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -11,26 +13,12 @@
 define double @fadd_d(double %a, double %b) nounwind strictfp {
 ; RV32IFD-LABEL: fadd_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fadd.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fadd_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fadd.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fadd_d:
@@ -58,26 +46,12 @@ declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata,
 define double @fsub_d(double %a, double %b) nounwind strictfp {
 ; RV32IFD-LABEL: fsub_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fsub.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fsub.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fsub.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fsub.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fsub_d:
@@ -105,26 +79,12 @@ declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata,
 define double @fmul_d(double %a, double %b) nounwind strictfp {
 ; RV32IFD-LABEL: fmul_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fmul.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmul.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmul_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmul.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmul.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmul_d:
@@ -152,26 +112,12 @@ declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata,
 define double @fdiv_d(double %a, double %b) nounwind strictfp {
 ; RV32IFD-LABEL: fdiv_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fdiv.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fdiv.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fdiv_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fdiv.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fdiv.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fdiv_d:
@@ -199,22 +145,12 @@ declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata,
 define double @fsqrt_d(double %a) nounwind strictfp {
 ; RV32IFD-LABEL: fsqrt_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fsqrt.d ft0, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fsqrt.d fa0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fsqrt_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fsqrt.d ft0, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fsqrt.d fa0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fsqrt_d:
@@ -324,30 +260,12 @@ declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadat
 define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fmadd_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmadd_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmadd_d:
@@ -375,34 +293,16 @@ declare double @llvm.experimental.constrained.fma.f64(double, double, double, me
 define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fmsub_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fmsub.d ft0, ft1, ft0, ft2
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV32IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmv.d.x ft2, a2
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fmsub.d ft0, ft1, ft0, ft2
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_d:
@@ -469,36 +369,18 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
 define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fnmadd_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV32IFD-NEXT:    fnmadd.d ft0, ft2, ft0, ft1
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft1, fa0, ft0
+; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV32IFD-NEXT:    fnmadd.d fa0, ft1, fa1, ft0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmadd_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a2
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV64IFD-NEXT:    fnmadd.d ft0, ft2, ft0, ft1
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft1, fa0, ft0
+; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV64IFD-NEXT:    fnmadd.d fa0, ft1, fa1, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d:
@@ -583,36 +465,18 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
 define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fnmadd_d_2:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV32IFD-NEXT:    fnmadd.d ft0, ft2, ft0, ft1
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft1, fa1, ft0
+; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV32IFD-NEXT:    fnmadd.d fa0, ft1, fa0, ft0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmadd_d_2:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a2
-; RV64IFD-NEXT:    fmv.d.x ft2, a1
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV64IFD-NEXT:    fnmadd.d ft0, ft2, ft0, ft1
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft1, fa1, ft0
+; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV64IFD-NEXT:    fnmadd.d fa0, ft1, fa0, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d_2:
@@ -698,34 +562,16 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
 define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fnmsub_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft0, fa0, ft0
+; RV32IFD-NEXT:    fnmsub.d fa0, ft0, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft0, fa0, ft0
+; RV64IFD-NEXT:    fnmsub.d fa0, ft0, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_d:
@@ -788,34 +634,16 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
 define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fnmsub_d_2:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft0, fa1, ft0
+; RV32IFD-NEXT:    fnmsub.d fa0, ft0, fa0, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmsub_d_2:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmv.d.x ft2, a1
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft0, fa1, ft0
+; RV64IFD-NEXT:    fnmsub.d fa0, ft0, fa0, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_d_2:

diff  --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index e789b07589772..e112e29401d4a 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN:   -target-abi=ilp32d | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64IFD %s
+; RUN:   -target-abi=lp64d | FileCheck -check-prefix=RV64IFD %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -16,26 +16,12 @@
 define double @fadd_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fadd_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fadd.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fadd_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fadd.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fadd_d:
@@ -62,26 +48,12 @@ define double @fadd_d(double %a, double %b) nounwind {
 define double @fsub_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fsub_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fsub.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fsub.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fsub.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fsub.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fsub_d:
@@ -108,26 +80,12 @@ define double @fsub_d(double %a, double %b) nounwind {
 define double @fmul_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fmul_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fmul.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmul.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmul_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmul.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmul.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmul_d:
@@ -154,26 +112,12 @@ define double @fmul_d(double %a, double %b) nounwind {
 define double @fdiv_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fdiv_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fdiv.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fdiv.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fdiv_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fdiv.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fdiv.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fdiv_d:
@@ -202,22 +146,12 @@ declare double @llvm.sqrt.f64(double)
 define double @fsqrt_d(double %a) nounwind {
 ; RV32IFD-LABEL: fsqrt_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fsqrt.d ft0, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fsqrt.d fa0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fsqrt_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fsqrt.d ft0, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fsqrt.d fa0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fsqrt_d:
@@ -246,26 +180,12 @@ declare double @llvm.copysign.f64(double, double)
 define double @fsgnj_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fsgnj_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fsgnj.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fsgnj.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fsgnj_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fsgnj.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fsgnj.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fsgnj_d:
@@ -295,20 +215,14 @@ define double @fsgnj_d(double %a, double %b) nounwind {
 define i32 @fneg_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fneg_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fadd.d ft0, ft0, ft0
+; RV32IFD-NEXT:    fadd.d ft0, fa0, fa0
 ; RV32IFD-NEXT:    fneg.d ft1, ft0
 ; RV32IFD-NEXT:    feq.d a0, ft0, ft1
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fneg_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fadd.d ft0, ft0, ft0
+; RV64IFD-NEXT:    fadd.d ft0, fa0, fa0
 ; RV64IFD-NEXT:    fneg.d ft1, ft0
 ; RV64IFD-NEXT:    feq.d a0, ft0, ft1
 ; RV64IFD-NEXT:    ret
@@ -356,29 +270,12 @@ define double @fsgnjn_d(double %a, double %b) nounwind {
 ;
 ; RV32IFD-LABEL: fsgnjn_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fsgnjn.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fsgnjn.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fsgnjn_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    li a2, -1
-; RV64IFD-NEXT:    slli a2, a2, 63
-; RV64IFD-NEXT:    xor a1, a1, a2
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fsgnj.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fsgnjn.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fsgnjn_d:
@@ -413,30 +310,16 @@ declare double @llvm.fabs.f64(double)
 define double @fabs_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fabs_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fadd.d ft0, fa0, fa1
 ; RV32IFD-NEXT:    fabs.d ft1, ft0
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fadd.d fa0, ft1, ft0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fabs_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fadd.d ft0, fa0, fa1
 ; RV64IFD-NEXT:    fabs.d ft1, ft0
-; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fadd.d fa0, ft1, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fabs_d:
@@ -478,26 +361,12 @@ declare double @llvm.minnum.f64(double, double)
 define double @fmin_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fmin_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fmin.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmin.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmin_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmin.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmin_d:
@@ -526,26 +395,12 @@ declare double @llvm.maxnum.f64(double, double)
 define double @fmax_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fmax_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fmax.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmax.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmax_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmax.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmax.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmax_d:
@@ -574,30 +429,12 @@ declare double @llvm.fma.f64(double, double, double)
 define double @fmadd_d(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fmadd_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmadd_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmadd_d:
@@ -624,34 +461,16 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind {
 define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fmsub_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fmsub.d ft0, ft1, ft0, ft2
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV32IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmv.d.x ft2, a2
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fmsub.d ft0, ft1, ft0, ft2
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_d:
@@ -718,36 +537,18 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmadd_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV32IFD-NEXT:    fnmadd.d ft0, ft2, ft0, ft1
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft1, fa0, ft0
+; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV32IFD-NEXT:    fnmadd.d fa0, ft1, fa1, ft0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmadd_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a2
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV64IFD-NEXT:    fnmadd.d ft0, ft2, ft0, ft1
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft1, fa0, ft0
+; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV64IFD-NEXT:    fnmadd.d fa0, ft1, fa1, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d:
@@ -832,36 +633,18 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmadd_d_2:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV32IFD-NEXT:    fnmadd.d ft0, ft2, ft0, ft1
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft1, fa1, ft0
+; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV32IFD-NEXT:    fnmadd.d fa0, ft1, fa0, ft0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmadd_d_2:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a2
-; RV64IFD-NEXT:    fmv.d.x ft2, a1
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV64IFD-NEXT:    fnmadd.d ft0, ft2, ft0, ft1
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft1, fa1, ft0
+; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV64IFD-NEXT:    fnmadd.d fa0, ft1, fa0, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d_2:
@@ -947,34 +730,16 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmsub_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft0, fa0, ft0
+; RV32IFD-NEXT:    fnmsub.d fa0, ft0, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft0, fa0, ft0
+; RV64IFD-NEXT:    fnmsub.d fa0, ft0, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_d:
@@ -1037,34 +802,16 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmsub_d_2:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft0, fa1, ft0
+; RV32IFD-NEXT:    fnmsub.d fa0, ft0, fa0, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmsub_d_2:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmv.d.x ft2, a1
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft0, fa1, ft0
+; RV64IFD-NEXT:    fnmsub.d fa0, ft0, fa0, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_d_2:
@@ -1131,30 +878,12 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 define double @fmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fmadd_d_contract:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmadd_d_contract:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmadd_d_contract:
@@ -1196,34 +925,16 @@ define double @fmadd_d_contract(double %a, double %b, double %c) nounwind {
 define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fmsub_d_contract:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fmsub.d ft0, ft1, ft0, ft2
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV32IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmsub_d_contract:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmv.d.x ft2, a2
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fmsub.d ft0, ft1, ft0, ft2
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_d_contract:
@@ -1298,38 +1009,20 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmadd_d_contract:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV32IFD-NEXT:    fadd.d ft0, ft0, ft3
-; RV32IFD-NEXT:    fnmadd.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft1, fa0, ft0
+; RV32IFD-NEXT:    fadd.d ft2, fa1, ft0
+; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV32IFD-NEXT:    fnmadd.d fa0, ft1, ft2, ft0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmadd_d_contract:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV64IFD-NEXT:    fadd.d ft0, ft0, ft3
-; RV64IFD-NEXT:    fnmadd.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft1, fa0, ft0
+; RV64IFD-NEXT:    fadd.d ft2, fa1, ft0
+; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
+; RV64IFD-NEXT:    fnmadd.d fa0, ft1, ft2, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d_contract:
@@ -1433,36 +1126,18 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmsub_d_contract:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fcvt.d.w ft3, zero
-; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV32IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV32IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w ft0, zero
+; RV32IFD-NEXT:    fadd.d ft1, fa0, ft0
+; RV32IFD-NEXT:    fadd.d ft0, fa1, ft0
+; RV32IFD-NEXT:    fnmsub.d fa0, ft1, ft0, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmsub_d_contract:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmv.d.x ft3, zero
-; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
-; RV64IFD-NEXT:    fadd.d ft1, ft1, ft3
-; RV64IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, zero
+; RV64IFD-NEXT:    fadd.d ft1, fa0, ft0
+; RV64IFD-NEXT:    fadd.d ft0, fa1, ft0
+; RV64IFD-NEXT:    fnmsub.d fa0, ft1, ft0, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_d_contract:

diff  --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
index 957bc37961591..2467280a95f24 100644
--- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN:   -target-abi=ilp32d | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64IFD %s
+; RUN:   -target-abi=lp64d | FileCheck -check-prefix=RV64IFD %s
 
 declare void @abort()
 declare void @exit(i32)
@@ -41,28 +41,18 @@ if.else:
 define void @br_fcmp_oeq(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_oeq:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft0
+; RV32IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV32IFD-NEXT:    bnez a0, .LBB1_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB1_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_oeq:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV64IFD-NEXT:    bnez a0, .LBB1_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -85,28 +75,18 @@ if.then:
 define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_oeq_alt:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft0
+; RV32IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV32IFD-NEXT:    bnez a0, .LBB2_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB2_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_oeq_alt:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV64IFD-NEXT:    bnez a0, .LBB2_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -126,28 +106,18 @@ if.else:
 define void @br_fcmp_ogt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ogt:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV32IFD-NEXT:    bnez a0, .LBB3_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB3_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ogt:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV64IFD-NEXT:    bnez a0, .LBB3_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -167,28 +137,18 @@ if.then:
 define void @br_fcmp_oge(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_oge:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV32IFD-NEXT:    bnez a0, .LBB4_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB4_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_oge:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV64IFD-NEXT:    bnez a0, .LBB4_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -208,28 +168,18 @@ if.then:
 define void @br_fcmp_olt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_olt:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV32IFD-NEXT:    bnez a0, .LBB5_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB5_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_olt:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV64IFD-NEXT:    bnez a0, .LBB5_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -249,28 +199,18 @@ if.then:
 define void @br_fcmp_ole(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ole:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV32IFD-NEXT:    bnez a0, .LBB6_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB6_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ole:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV64IFD-NEXT:    bnez a0, .LBB6_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -290,31 +230,21 @@ if.then:
 define void @br_fcmp_one(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_one:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    flt.d a1, ft0, ft1
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV32IFD-NEXT:    or a0, a1, a0
 ; RV32IFD-NEXT:    bnez a0, .LBB7_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB7_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_one:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
-; RV64IFD-NEXT:    flt.d a1, ft0, ft1
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV64IFD-NEXT:    or a0, a1, a0
 ; RV64IFD-NEXT:    bnez a0, .LBB7_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
@@ -335,31 +265,21 @@ if.then:
 define void @br_fcmp_ord(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ord:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft1
-; RV32IFD-NEXT:    feq.d a1, ft0, ft0
+; RV32IFD-NEXT:    feq.d a0, fa1, fa1
+; RV32IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV32IFD-NEXT:    and a0, a1, a0
 ; RV32IFD-NEXT:    bnez a0, .LBB8_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB8_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ord:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    feq.d a0, ft1, ft1
-; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa1, fa1
+; RV64IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    bnez a0, .LBB8_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
@@ -380,31 +300,21 @@ if.then:
 define void @br_fcmp_ueq(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ueq:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    flt.d a1, ft0, ft1
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV32IFD-NEXT:    or a0, a1, a0
 ; RV32IFD-NEXT:    beqz a0, .LBB9_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB9_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ueq:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
-; RV64IFD-NEXT:    flt.d a1, ft0, ft1
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV64IFD-NEXT:    or a0, a1, a0
 ; RV64IFD-NEXT:    beqz a0, .LBB9_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
@@ -425,28 +335,18 @@ if.then:
 define void @br_fcmp_ugt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ugt:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV32IFD-NEXT:    beqz a0, .LBB10_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB10_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ugt:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV64IFD-NEXT:    beqz a0, .LBB10_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -466,28 +366,18 @@ if.then:
 define void @br_fcmp_uge(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_uge:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV32IFD-NEXT:    beqz a0, .LBB11_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB11_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_uge:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV64IFD-NEXT:    beqz a0, .LBB11_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -507,28 +397,18 @@ if.then:
 define void @br_fcmp_ult(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ult:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV32IFD-NEXT:    beqz a0, .LBB12_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB12_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ult:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV64IFD-NEXT:    beqz a0, .LBB12_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -548,28 +428,18 @@ if.then:
 define void @br_fcmp_ule(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_ule:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV32IFD-NEXT:    beqz a0, .LBB13_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB13_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_ule:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV64IFD-NEXT:    beqz a0, .LBB13_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -589,28 +459,18 @@ if.then:
 define void @br_fcmp_une(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_une:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft0
+; RV32IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV32IFD-NEXT:    beqz a0, .LBB14_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB14_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_une:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV64IFD-NEXT:    beqz a0, .LBB14_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret
@@ -631,31 +491,21 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
 ; TODO: sltiu+bne -> beq
 ; RV32IFD-LABEL: br_fcmp_uno:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft1
-; RV32IFD-NEXT:    feq.d a1, ft0, ft0
+; RV32IFD-NEXT:    feq.d a0, fa1, fa1
+; RV32IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV32IFD-NEXT:    and a0, a1, a0
 ; RV32IFD-NEXT:    beqz a0, .LBB15_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB15_2: # %if.then
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV64IFD-LABEL: br_fcmp_uno:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    feq.d a0, ft1, ft1
-; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa1, fa1
+; RV64IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    beqz a0, .LBB15_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else

diff  --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
index 55fee362da880..52f7d16d42c0d 100644
--- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
@@ -1,8 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32IFD %s
+; RUN:   -disable-strictnode-mutation -target-abi=ilp32d \
+; RUN:   | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64IFD %s
+; RUN:   -disable-strictnode-mutation -target-abi=lp64d \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -15,20 +17,12 @@
 define float @fcvt_s_d(double %a) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_s_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.s.d ft0, ft0
-; RV32IFD-NEXT:    fmv.x.w a0, ft0
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.s.d fa0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_s_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.s.d ft0, ft0
-; RV64IFD-NEXT:    fmv.x.w a0, ft0
+; RV64IFD-NEXT:    fcvt.s.d fa0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_d:
@@ -56,20 +50,12 @@ declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, m
 define double @fcvt_d_s(float %a) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_s:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fmv.w.x ft0, a0
-; RV32IFD-NEXT:    fcvt.d.s ft0, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.s fa0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_s:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.w.x ft0, a0
-; RV64IFD-NEXT:    fcvt.d.s ft0, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.s fa0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_s:
@@ -97,18 +83,12 @@ declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
 define i32 @fcvt_w_d(double %a) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_w_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_w_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_w_d:
@@ -138,18 +118,12 @@ declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
 define i32 @fcvt_wu_d(double %a) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_wu_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_d:
@@ -179,23 +153,17 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
 define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) nounwind {
 ; RV32IFD-LABEL: fcvt_wu_d_multiple_use:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.wu.d a1, ft0, rtz
+; RV32IFD-NEXT:    fcvt.wu.d a1, fa0, rtz
 ; RV32IFD-NEXT:    li a0, 1
 ; RV32IFD-NEXT:    beqz a1, .LBB4_2
 ; RV32IFD-NEXT:  # %bb.1:
 ; RV32IFD-NEXT:    mv a0, a1
 ; RV32IFD-NEXT:  .LBB4_2:
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_d_multiple_use:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.wu.d a1, ft0, rtz
+; RV64IFD-NEXT:    fcvt.wu.d a1, fa0, rtz
 ; RV64IFD-NEXT:    li a0, 1
 ; RV64IFD-NEXT:    beqz a1, .LBB4_2
 ; RV64IFD-NEXT:  # %bb.1:
@@ -241,18 +209,12 @@ define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) nounwind {
 define double @fcvt_d_w(i32 %a) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_w:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.w ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_w:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.w ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_w:
@@ -281,20 +243,14 @@ declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, meta
 define double @fcvt_d_w_load(i32* %p) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_w_load:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    lw a0, 0(a0)
-; RV32IFD-NEXT:    fcvt.d.w ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_w_load:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lw a0, 0(a0)
-; RV64IFD-NEXT:    fcvt.d.w ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_w_load:
@@ -324,18 +280,12 @@ define double @fcvt_d_w_load(i32* %p) nounwind strictfp {
 define double @fcvt_d_wu(i32 %a) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_wu:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_wu:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_wu:
@@ -364,20 +314,14 @@ declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, meta
 define double @fcvt_d_wu_load(i32* %p) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_wu_load:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    lw a0, 0(a0)
-; RV32IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_wu_load:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lwu a0, 0(a0)
-; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_wu_load:
@@ -416,8 +360,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
 ;
 ; RV64IFD-LABEL: fcvt_l_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_l_d:
@@ -454,8 +397,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
 ;
 ; RV64IFD-LABEL: fcvt_lu_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_lu_d:
@@ -492,8 +434,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
 ;
 ; RV64IFD-LABEL: fcvt_d_l:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.l fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_l:
@@ -530,8 +471,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
 ;
 ; RV64IFD-LABEL: fcvt_d_lu:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.lu ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.lu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_lu:
@@ -559,18 +499,12 @@ declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, meta
 define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_w_i8:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.w ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_w_i8:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.w ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_w_i8:
@@ -598,18 +532,12 @@ declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metada
 define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_wu_i8:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_wu_i8:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_wu_i8:
@@ -637,18 +565,12 @@ declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metada
 define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_w_i16:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.w ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_w_i16:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.w ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_w_i16:
@@ -676,18 +598,12 @@ declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, meta
 define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_wu_i16:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_wu_i16:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_wu_i16:

diff  --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 59e626df21fd0..63af598ee4653 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN:   -target-abi=ilp32d | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64IFD %s
+; RUN:   -target-abi=lp64d | FileCheck -check-prefix=RV64IFD %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -11,20 +11,12 @@
 define float @fcvt_s_d(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_s_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.s.d ft0, ft0
-; RV32IFD-NEXT:    fmv.x.w a0, ft0
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.s.d fa0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_s_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.s.d ft0, ft0
-; RV64IFD-NEXT:    fmv.x.w a0, ft0
+; RV64IFD-NEXT:    fcvt.s.d fa0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_d:
@@ -51,20 +43,12 @@ define float @fcvt_s_d(double %a) nounwind {
 define double @fcvt_d_s(float %a) nounwind {
 ; RV32IFD-LABEL: fcvt_d_s:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fmv.w.x ft0, a0
-; RV32IFD-NEXT:    fcvt.d.s ft0, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.s fa0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_s:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.w.x ft0, a0
-; RV64IFD-NEXT:    fcvt.d.s ft0, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.s fa0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_s:
@@ -91,18 +75,12 @@ define double @fcvt_d_s(float %a) nounwind {
 define i32 @fcvt_w_d(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_w_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_w_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_w_d:
@@ -129,31 +107,24 @@ define i32 @fcvt_w_d(double %a) nounwind {
 define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_w_d_sat:
 ; RV32IFD:       # %bb.0: # %start
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft0, ft0
+; RV32IFD-NEXT:    feq.d a0, fa0, fa0
 ; RV32IFD-NEXT:    bnez a0, .LBB3_2
 ; RV32IFD-NEXT:  # %bb.1: # %start
 ; RV32IFD-NEXT:    li a0, 0
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB3_2:
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_w_d_sat:
 ; RV64IFD:       # %bb.0: # %start
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa0
 ; RV64IFD-NEXT:    bnez a0, .LBB3_2
 ; RV64IFD-NEXT:  # %bb.1: # %start
 ; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB3_2:
-; RV64IFD-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_w_d_sat:
@@ -273,18 +244,12 @@ declare i32 @llvm.fptosi.sat.i32.f64(double)
 define i32 @fcvt_wu_d(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_wu_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_d:
@@ -313,23 +278,17 @@ define i32 @fcvt_wu_d(double %a) nounwind {
 define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) nounwind {
 ; RV32IFD-LABEL: fcvt_wu_d_multiple_use:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.wu.d a1, ft0, rtz
+; RV32IFD-NEXT:    fcvt.wu.d a1, fa0, rtz
 ; RV32IFD-NEXT:    li a0, 1
 ; RV32IFD-NEXT:    beqz a1, .LBB5_2
 ; RV32IFD-NEXT:  # %bb.1:
 ; RV32IFD-NEXT:    mv a0, a1
 ; RV32IFD-NEXT:  .LBB5_2:
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_d_multiple_use:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.wu.d a1, ft0, rtz
+; RV64IFD-NEXT:    fcvt.wu.d a1, fa0, rtz
 ; RV64IFD-NEXT:    li a0, 1
 ; RV64IFD-NEXT:    beqz a1, .LBB5_2
 ; RV64IFD-NEXT:  # %bb.1:
@@ -375,31 +334,24 @@ define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) nounwind {
 define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_wu_d_sat:
 ; RV32IFD:       # %bb.0: # %start
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft0, ft0
+; RV32IFD-NEXT:    feq.d a0, fa0, fa0
 ; RV32IFD-NEXT:    bnez a0, .LBB6_2
 ; RV32IFD-NEXT:  # %bb.1: # %start
 ; RV32IFD-NEXT:    li a0, 0
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB6_2:
-; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_d_sat:
 ; RV64IFD:       # %bb.0: # %start
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa0
 ; RV64IFD-NEXT:    bnez a0, .LBB6_2
 ; RV64IFD-NEXT:  # %bb.1: # %start
 ; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB6_2:
-; RV64IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_d_sat:
@@ -490,18 +442,12 @@ declare i32 @llvm.fptoui.sat.i32.f64(double)
 define double @fcvt_d_w(i32 %a) nounwind {
 ; RV32IFD-LABEL: fcvt_d_w:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.w ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_w:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.w ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_w:
@@ -529,20 +475,14 @@ define double @fcvt_d_w(i32 %a) nounwind {
 define double @fcvt_d_w_load(i32* %p) nounwind {
 ; RV32IFD-LABEL: fcvt_d_w_load:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    lw a0, 0(a0)
-; RV32IFD-NEXT:    fcvt.d.w ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_w_load:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lw a0, 0(a0)
-; RV64IFD-NEXT:    fcvt.d.w ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_w_load:
@@ -572,18 +512,12 @@ define double @fcvt_d_w_load(i32* %p) nounwind {
 define double @fcvt_d_wu(i32 %a) nounwind {
 ; RV32IFD-LABEL: fcvt_d_wu:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_wu:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_wu:
@@ -611,20 +545,14 @@ define double @fcvt_d_wu(i32 %a) nounwind {
 define double @fcvt_d_wu_load(i32* %p) nounwind {
 ; RV32IFD-LABEL: fcvt_d_wu_load:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    lw a0, 0(a0)
-; RV32IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_wu_load:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lwu a0, 0(a0)
-; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_wu_load:
@@ -663,8 +591,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
 ;
 ; RV64IFD-LABEL: fcvt_l_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_l_d:
@@ -691,69 +618,68 @@ define i64 @fcvt_l_d(double %a) nounwind {
 define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_l_d_sat:
 ; RV32IFD:       # %bb.0: # %start
-; RV32IFD-NEXT:    addi sp, sp, -32
-; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a0, 16(sp)
-; RV32IFD-NEXT:    sw a1, 20(sp)
-; RV32IFD-NEXT:    fld ft0, 16(sp)
-; RV32IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
+; RV32IFD-NEXT:    lui a0, %hi(.LCPI12_0)
+; RV32IFD-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; RV32IFD-NEXT:    fmv.d fs0, fa0
+; RV32IFD-NEXT:    fle.d s0, ft0, fa0
 ; RV32IFD-NEXT:    call __fixdfdi at plt
-; RV32IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT:    lui a2, %hi(.LCPI12_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI12_0)(a2)
-; RV32IFD-NEXT:    fle.d a3, ft0, ft1
 ; RV32IFD-NEXT:    mv a2, a0
-; RV32IFD-NEXT:    bnez a3, .LBB12_2
+; RV32IFD-NEXT:    bnez s0, .LBB12_2
 ; RV32IFD-NEXT:  # %bb.1: # %start
 ; RV32IFD-NEXT:    li a2, 0
 ; RV32IFD-NEXT:  .LBB12_2: # %start
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI12_1)
 ; RV32IFD-NEXT:    fld ft0, %lo(.LCPI12_1)(a0)
-; RV32IFD-NEXT:    flt.d a4, ft0, ft1
+; RV32IFD-NEXT:    flt.d a3, ft0, fs0
 ; RV32IFD-NEXT:    li a0, -1
-; RV32IFD-NEXT:    beqz a4, .LBB12_9
+; RV32IFD-NEXT:    beqz a3, .LBB12_9
 ; RV32IFD-NEXT:  # %bb.3: # %start
-; RV32IFD-NEXT:    feq.d a2, ft1, ft1
+; RV32IFD-NEXT:    feq.d a2, fs0, fs0
 ; RV32IFD-NEXT:    beqz a2, .LBB12_10
 ; RV32IFD-NEXT:  .LBB12_4: # %start
-; RV32IFD-NEXT:    lui a5, 524288
-; RV32IFD-NEXT:    beqz a3, .LBB12_11
+; RV32IFD-NEXT:    lui a4, 524288
+; RV32IFD-NEXT:    beqz s0, .LBB12_11
 ; RV32IFD-NEXT:  .LBB12_5: # %start
-; RV32IFD-NEXT:    bnez a4, .LBB12_12
+; RV32IFD-NEXT:    bnez a3, .LBB12_12
 ; RV32IFD-NEXT:  .LBB12_6: # %start
 ; RV32IFD-NEXT:    bnez a2, .LBB12_8
 ; RV32IFD-NEXT:  .LBB12_7: # %start
 ; RV32IFD-NEXT:    li a1, 0
 ; RV32IFD-NEXT:  .LBB12_8: # %start
-; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB12_9: # %start
 ; RV32IFD-NEXT:    mv a0, a2
-; RV32IFD-NEXT:    feq.d a2, ft1, ft1
+; RV32IFD-NEXT:    feq.d a2, fs0, fs0
 ; RV32IFD-NEXT:    bnez a2, .LBB12_4
 ; RV32IFD-NEXT:  .LBB12_10: # %start
 ; RV32IFD-NEXT:    li a0, 0
-; RV32IFD-NEXT:    lui a5, 524288
-; RV32IFD-NEXT:    bnez a3, .LBB12_5
+; RV32IFD-NEXT:    lui a4, 524288
+; RV32IFD-NEXT:    bnez s0, .LBB12_5
 ; RV32IFD-NEXT:  .LBB12_11: # %start
 ; RV32IFD-NEXT:    lui a1, 524288
-; RV32IFD-NEXT:    beqz a4, .LBB12_6
+; RV32IFD-NEXT:    beqz a3, .LBB12_6
 ; RV32IFD-NEXT:  .LBB12_12:
-; RV32IFD-NEXT:    addi a1, a5, -1
+; RV32IFD-NEXT:    addi a1, a4, -1
 ; RV32IFD-NEXT:    beqz a2, .LBB12_7
 ; RV32IFD-NEXT:    j .LBB12_8
 ;
 ; RV64IFD-LABEL: fcvt_l_d_sat:
 ; RV64IFD:       # %bb.0: # %start
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa0
 ; RV64IFD-NEXT:    bnez a0, .LBB12_2
 ; RV64IFD-NEXT:  # %bb.1: # %start
 ; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB12_2:
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_l_d_sat:
@@ -920,8 +846,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
 ;
 ; RV64IFD-LABEL: fcvt_lu_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_lu_d:
@@ -948,56 +873,55 @@ define i64 @fcvt_lu_d(double %a) nounwind {
 define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_lu_d_sat:
 ; RV32IFD:       # %bb.0: # %start
-; RV32IFD-NEXT:    addi sp, sp, -32
-; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw a0, 16(sp)
-; RV32IFD-NEXT:    sw a1, 20(sp)
-; RV32IFD-NEXT:    fld ft0, 16(sp)
-; RV32IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    call __fixunsdfdi at plt
-; RV32IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
+; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fle.d a4, ft0, ft1
+; RV32IFD-NEXT:    fle.d s0, ft0, fa0
+; RV32IFD-NEXT:    call __fixunsdfdi at plt
 ; RV32IFD-NEXT:    mv a3, a0
-; RV32IFD-NEXT:    bnez a4, .LBB14_2
+; RV32IFD-NEXT:    bnez s0, .LBB14_2
 ; RV32IFD-NEXT:  # %bb.1: # %start
 ; RV32IFD-NEXT:    li a3, 0
 ; RV32IFD-NEXT:  .LBB14_2: # %start
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI14_0)
 ; RV32IFD-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
-; RV32IFD-NEXT:    flt.d a5, ft0, ft1
+; RV32IFD-NEXT:    flt.d a4, ft0, fs0
 ; RV32IFD-NEXT:    li a2, -1
 ; RV32IFD-NEXT:    li a0, -1
-; RV32IFD-NEXT:    beqz a5, .LBB14_7
+; RV32IFD-NEXT:    beqz a4, .LBB14_7
 ; RV32IFD-NEXT:  # %bb.3: # %start
-; RV32IFD-NEXT:    beqz a4, .LBB14_8
+; RV32IFD-NEXT:    beqz s0, .LBB14_8
 ; RV32IFD-NEXT:  .LBB14_4: # %start
-; RV32IFD-NEXT:    bnez a5, .LBB14_6
+; RV32IFD-NEXT:    bnez a4, .LBB14_6
 ; RV32IFD-NEXT:  .LBB14_5: # %start
 ; RV32IFD-NEXT:    mv a2, a1
 ; RV32IFD-NEXT:  .LBB14_6: # %start
 ; RV32IFD-NEXT:    mv a1, a2
-; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB14_7: # %start
 ; RV32IFD-NEXT:    mv a0, a3
-; RV32IFD-NEXT:    bnez a4, .LBB14_4
+; RV32IFD-NEXT:    bnez s0, .LBB14_4
 ; RV32IFD-NEXT:  .LBB14_8: # %start
 ; RV32IFD-NEXT:    li a1, 0
-; RV32IFD-NEXT:    beqz a5, .LBB14_5
+; RV32IFD-NEXT:    beqz a4, .LBB14_5
 ; RV32IFD-NEXT:    j .LBB14_6
 ;
 ; RV64IFD-LABEL: fcvt_lu_d_sat:
 ; RV64IFD:       # %bb.0: # %start
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa0
 ; RV64IFD-NEXT:    bnez a0, .LBB14_2
 ; RV64IFD-NEXT:  # %bb.1: # %start
 ; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB14_2:
-; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_lu_d_sat:
@@ -1119,13 +1043,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fmv_x_d:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    sw a3, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    sw a0, 0(sp)
-; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp)
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fadd.d ft0, fa0, fa1
 ; RV32IFD-NEXT:    fsd ft0, 8(sp)
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
@@ -1134,9 +1052,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
 ;
 ; RV64IFD-LABEL: fmv_x_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fadd.d ft0, fa0, fa1
 ; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
 ;
@@ -1174,8 +1090,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
 ;
 ; RV64IFD-LABEL: fcvt_d_l:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.l fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_l:
@@ -1211,8 +1126,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
 ;
 ; RV64IFD-LABEL: fcvt_d_lu:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.lu ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.lu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_lu:
@@ -1240,26 +1154,22 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
 ; Ensure fmv.w.x is generated even for a soft double calling convention
 ; RV32IFD-LABEL: fmv_d_x:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -32
-; RV32IFD-NEXT:    sw a3, 20(sp)
-; RV32IFD-NEXT:    sw a2, 16(sp)
-; RV32IFD-NEXT:    sw a1, 28(sp)
-; RV32IFD-NEXT:    sw a0, 24(sp)
-; RV32IFD-NEXT:    fld ft0, 16(sp)
-; RV32IFD-NEXT:    fld ft1, 24(sp)
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a3, 4(sp)
+; RV32IFD-NEXT:    sw a2, 0(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    fld ft0, 0(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    fadd.d fa0, ft1, ft0
+; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmv_d_x:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fadd.d fa0, ft0, ft1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmv_d_x:
@@ -1288,18 +1198,12 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
 define double @fcvt_d_w_i8(i8 signext %a) nounwind {
 ; RV32IFD-LABEL: fcvt_d_w_i8:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.w ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_w_i8:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.w ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_w_i8:
@@ -1326,18 +1230,12 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind {
 define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
 ; RV32IFD-LABEL: fcvt_d_wu_i8:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_wu_i8:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_wu_i8:
@@ -1364,18 +1262,12 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
 define double @fcvt_d_w_i16(i16 signext %a) nounwind {
 ; RV32IFD-LABEL: fcvt_d_w_i16:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.w ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_w_i16:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.w ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.w fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_w_i16:
@@ -1402,18 +1294,12 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind {
 define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind {
 ; RV32IFD-LABEL: fcvt_d_wu_i16:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_wu_i16:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_wu_i16:
@@ -1556,18 +1442,12 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, double* %1) nounwind
 define signext i16 @fcvt_w_s_i16(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_w_s_i16:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_w_s_i16:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_w_s_i16:
@@ -1594,42 +1474,35 @@ define signext i16 @fcvt_w_s_i16(double %a) nounwind {
 define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_w_s_sat_i16:
 ; RV32IFD:       # %bb.0: # %start
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft0, ft0
+; RV32IFD-NEXT:    feq.d a0, fa0, fa0
 ; RV32IFD-NEXT:    bnez a0, .LBB26_2
 ; RV32IFD-NEXT:  # %bb.1: # %start
 ; RV32IFD-NEXT:    li a0, 0
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB26_2:
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI26_0)
-; RV32IFD-NEXT:    fld ft1, %lo(.LCPI26_0)(a0)
+; RV32IFD-NEXT:    fld ft0, %lo(.LCPI26_0)(a0)
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI26_1)
-; RV32IFD-NEXT:    fld ft2, %lo(.LCPI26_1)(a0)
-; RV32IFD-NEXT:    fmax.d ft0, ft0, ft1
-; RV32IFD-NEXT:    fmin.d ft0, ft0, ft2
+; RV32IFD-NEXT:    fld ft1, %lo(.LCPI26_1)(a0)
+; RV32IFD-NEXT:    fmax.d ft0, fa0, ft0
+; RV32IFD-NEXT:    fmin.d ft0, ft0, ft1
 ; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_w_s_sat_i16:
 ; RV64IFD:       # %bb.0: # %start
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa0
 ; RV64IFD-NEXT:    bnez a0, .LBB26_2
 ; RV64IFD-NEXT:  # %bb.1: # %start
 ; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB26_2:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI26_0)
-; RV64IFD-NEXT:    fld ft1, %lo(.LCPI26_0)(a0)
+; RV64IFD-NEXT:    fld ft0, %lo(.LCPI26_0)(a0)
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI26_1)
-; RV64IFD-NEXT:    fld ft2, %lo(.LCPI26_1)(a0)
-; RV64IFD-NEXT:    fmax.d ft0, ft0, ft1
-; RV64IFD-NEXT:    fmin.d ft0, ft0, ft2
+; RV64IFD-NEXT:    fld ft1, %lo(.LCPI26_1)(a0)
+; RV64IFD-NEXT:    fmax.d ft0, fa0, ft0
+; RV64IFD-NEXT:    fmin.d ft0, ft0, ft1
 ; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
 ; RV64IFD-NEXT:    ret
 ;
@@ -1745,18 +1618,12 @@ declare i16 @llvm.fptosi.sat.i16.f64(double)
 define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_wu_s_i16:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_s_i16:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_i16:
@@ -1783,26 +1650,20 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
 define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_wu_s_sat_i16:
 ; RV32IFD:       # %bb.0: # %start
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI28_0)
-; RV32IFD-NEXT:    fld ft1, %lo(.LCPI28_0)(a0)
-; RV32IFD-NEXT:    fcvt.d.w ft2, zero
-; RV32IFD-NEXT:    fmax.d ft0, ft0, ft2
-; RV32IFD-NEXT:    fmin.d ft0, ft0, ft1
+; RV32IFD-NEXT:    fld ft0, %lo(.LCPI28_0)(a0)
+; RV32IFD-NEXT:    fcvt.d.w ft1, zero
+; RV32IFD-NEXT:    fmax.d ft1, fa0, ft1
+; RV32IFD-NEXT:    fmin.d ft0, ft1, ft0
 ; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_s_sat_i16:
 ; RV64IFD:       # %bb.0: # %start
-; RV64IFD-NEXT:    lui a1, %hi(.LCPI28_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI28_0)(a1)
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmv.d.x ft2, zero
-; RV64IFD-NEXT:    fmax.d ft1, ft1, ft2
+; RV64IFD-NEXT:    lui a0, %hi(.LCPI28_0)
+; RV64IFD-NEXT:    fld ft0, %lo(.LCPI28_0)(a0)
+; RV64IFD-NEXT:    fmv.d.x ft1, zero
+; RV64IFD-NEXT:    fmax.d ft1, fa0, ft1
 ; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
 ; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
 ; RV64IFD-NEXT:    ret
@@ -1899,18 +1760,12 @@ declare i16 @llvm.fptoui.sat.i16.f64(double)
 define signext i8 @fcvt_w_s_i8(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_w_s_i8:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_w_s_i8:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_w_s_i8:
@@ -1937,42 +1792,35 @@ define signext i8 @fcvt_w_s_i8(double %a) nounwind {
 define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_w_s_sat_i8:
 ; RV32IFD:       # %bb.0: # %start
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft0, ft0
+; RV32IFD-NEXT:    feq.d a0, fa0, fa0
 ; RV32IFD-NEXT:    bnez a0, .LBB30_2
 ; RV32IFD-NEXT:  # %bb.1: # %start
 ; RV32IFD-NEXT:    li a0, 0
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB30_2:
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI30_0)
-; RV32IFD-NEXT:    fld ft1, %lo(.LCPI30_0)(a0)
+; RV32IFD-NEXT:    fld ft0, %lo(.LCPI30_0)(a0)
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI30_1)
-; RV32IFD-NEXT:    fld ft2, %lo(.LCPI30_1)(a0)
-; RV32IFD-NEXT:    fmax.d ft0, ft0, ft1
-; RV32IFD-NEXT:    fmin.d ft0, ft0, ft2
+; RV32IFD-NEXT:    fld ft1, %lo(.LCPI30_1)(a0)
+; RV32IFD-NEXT:    fmax.d ft0, fa0, ft0
+; RV32IFD-NEXT:    fmin.d ft0, ft0, ft1
 ; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_w_s_sat_i8:
 ; RV64IFD:       # %bb.0: # %start
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa0
 ; RV64IFD-NEXT:    bnez a0, .LBB30_2
 ; RV64IFD-NEXT:  # %bb.1: # %start
 ; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB30_2:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI30_0)
-; RV64IFD-NEXT:    fld ft1, %lo(.LCPI30_0)(a0)
+; RV64IFD-NEXT:    fld ft0, %lo(.LCPI30_0)(a0)
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI30_1)
-; RV64IFD-NEXT:    fld ft2, %lo(.LCPI30_1)(a0)
-; RV64IFD-NEXT:    fmax.d ft0, ft0, ft1
-; RV64IFD-NEXT:    fmin.d ft0, ft0, ft2
+; RV64IFD-NEXT:    fld ft1, %lo(.LCPI30_1)(a0)
+; RV64IFD-NEXT:    fmax.d ft0, fa0, ft0
+; RV64IFD-NEXT:    fmin.d ft0, ft0, ft1
 ; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
 ; RV64IFD-NEXT:    ret
 ;
@@ -2088,18 +1936,12 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
 ;
 ; RV32IFD-LABEL: fcvt_wu_s_i8:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_s_i8:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_i8:
@@ -2128,26 +1970,20 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
 ;
 ; RV32IFD-LABEL: fcvt_wu_s_sat_i8:
 ; RV32IFD:       # %bb.0: # %start
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI32_0)
-; RV32IFD-NEXT:    fld ft1, %lo(.LCPI32_0)(a0)
-; RV32IFD-NEXT:    fcvt.d.w ft2, zero
-; RV32IFD-NEXT:    fmax.d ft0, ft0, ft2
-; RV32IFD-NEXT:    fmin.d ft0, ft0, ft1
+; RV32IFD-NEXT:    fld ft0, %lo(.LCPI32_0)(a0)
+; RV32IFD-NEXT:    fcvt.d.w ft1, zero
+; RV32IFD-NEXT:    fmax.d ft1, fa0, ft1
+; RV32IFD-NEXT:    fmin.d ft0, ft1, ft0
 ; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_s_sat_i8:
 ; RV64IFD:       # %bb.0: # %start
-; RV64IFD-NEXT:    lui a1, %hi(.LCPI32_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI32_0)(a1)
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmv.d.x ft2, zero
-; RV64IFD-NEXT:    fmax.d ft1, ft1, ft2
+; RV64IFD-NEXT:    lui a0, %hi(.LCPI32_0)
+; RV64IFD-NEXT:    fld ft0, %lo(.LCPI32_0)(a0)
+; RV64IFD-NEXT:    fmv.d.x ft1, zero
+; RV64IFD-NEXT:    fmax.d ft1, fa0, ft1
 ; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
 ; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
 ; RV64IFD-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll
index 55c2d6b410e00..4f987a077b446 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN:   -target-abi=ilp32d | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64IFD %s
+; RUN:   -target-abi=lp64d | FileCheck -check-prefix=RV64IFD %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -36,22 +36,12 @@ define i32 @fcmp_false(double %a, double %b) nounwind {
 define i32 @fcmp_oeq(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_oeq:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft0
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_oeq:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_oeq:
@@ -81,22 +71,12 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind {
 define i32 @fcmp_ogt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_ogt:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_ogt:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_ogt:
@@ -126,22 +106,12 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind {
 define i32 @fcmp_oge(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_oge:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_oge:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_oge:
@@ -173,22 +143,12 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
 define i32 @fcmp_olt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_olt:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_olt:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_olt:
@@ -218,22 +178,12 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
 define i32 @fcmp_ole(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_ole:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_ole:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_ole:
@@ -263,25 +213,15 @@ define i32 @fcmp_ole(double %a, double %b) nounwind {
 define i32 @fcmp_one(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_one:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    flt.d a1, ft0, ft1
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV32IFD-NEXT:    or a0, a1, a0
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_one:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
-; RV64IFD-NEXT:    flt.d a1, ft0, ft1
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV64IFD-NEXT:    or a0, a1, a0
 ; RV64IFD-NEXT:    ret
 ;
@@ -346,25 +286,15 @@ define i32 @fcmp_one(double %a, double %b) nounwind {
 define i32 @fcmp_ord(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_ord:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft1
-; RV32IFD-NEXT:    feq.d a1, ft0, ft0
+; RV32IFD-NEXT:    feq.d a0, fa1, fa1
+; RV32IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV32IFD-NEXT:    and a0, a1, a0
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_ord:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    feq.d a0, ft1, ft1
-; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa1, fa1
+; RV64IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
 ;
@@ -395,26 +325,16 @@ define i32 @fcmp_ord(double %a, double %b) nounwind {
 define i32 @fcmp_ueq(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_ueq:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    flt.d a1, ft0, ft1
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV32IFD-NEXT:    or a0, a1, a0
 ; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_ueq:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
-; RV64IFD-NEXT:    flt.d a1, ft0, ft1
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV64IFD-NEXT:    or a0, a1, a0
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    ret
@@ -480,23 +400,13 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind {
 define i32 @fcmp_ugt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_ugt:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_ugt:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    ret
 ;
@@ -527,23 +437,13 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind {
 define i32 @fcmp_uge(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_uge:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_uge:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    ret
 ;
@@ -576,23 +476,13 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
 define i32 @fcmp_ult(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_ult:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_ult:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    ret
 ;
@@ -623,23 +513,13 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
 define i32 @fcmp_ule(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_ule:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_ule:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    ret
 ;
@@ -670,23 +550,13 @@ define i32 @fcmp_ule(double %a, double %b) nounwind {
 define i32 @fcmp_une(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_une:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft0
+; RV32IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_une:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    ret
 ;
@@ -717,26 +587,16 @@ define i32 @fcmp_une(double %a, double %b) nounwind {
 define i32 @fcmp_uno(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_uno:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft1
-; RV32IFD-NEXT:    feq.d a1, ft0, ft0
+; RV32IFD-NEXT:    feq.d a0, fa1, fa1
+; RV32IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV32IFD-NEXT:    and a0, a1, a0
 ; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_uno:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    feq.d a0, ft1, ft1
-; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa1, fa1
+; RV64IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    xori a0, a0, 1
 ; RV64IFD-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
index 1e76500d2001b..b1968fe831773 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \
-; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
+; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=ilp32d \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
-; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
+; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64d \
 ; RUN:   | FileCheck -check-prefix=RV64IFD %s
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
 ; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
@@ -17,22 +17,12 @@ declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadat
 define double @sqrt_f64(double %a) nounwind strictfp {
 ; RV32IFD-LABEL: sqrt_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fsqrt.d ft0, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fsqrt.d fa0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: sqrt_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fsqrt.d ft0, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fsqrt.d fa0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: sqrt_f64:
@@ -72,7 +62,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    sext.w a1, a1
+; RV64IFD-NEXT:    sext.w a0, a0
 ; RV64IFD-NEXT:    call __powidf2 at plt
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -190,29 +180,17 @@ define double @sincos_f64(double %a) nounwind strictfp {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -32
 ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    mv s0, a1
-; RV32IFD-NEXT:    mv s1, a0
+; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
+; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    call sin at plt
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fsd ft0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    mv a0, s1
-; RV32IFD-NEXT:    mv a1, s0
+; RV32IFD-NEXT:    fmv.d fs1, fa0
+; RV32IFD-NEXT:    fmv.d fa0, fs0
 ; RV32IFD-NEXT:    call cos at plt
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    fadd.d fa0, fs1, fa0
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
 ;
@@ -220,19 +198,17 @@ define double @sincos_f64(double %a) nounwind strictfp {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -32
 ; RV64IFD-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    mv s0, a0
+; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    fmv.d fs0, fa0
 ; RV64IFD-NEXT:    call sin at plt
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    mv a0, s0
+; RV64IFD-NEXT:    fmv.d fs1, fa0
+; RV64IFD-NEXT:    fmv.d fa0, fs0
 ; RV64IFD-NEXT:    call cos at plt
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fadd.d fa0, fs1, fa0
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 32
 ; RV64IFD-NEXT:    ret
 ;
@@ -547,30 +523,12 @@ declare double @llvm.experimental.constrained.fma.f64(double, double, double, me
 define double @fma_f64(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fma_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fma_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fma_f64:
@@ -599,30 +557,12 @@ declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double
 define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fmuladd_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmuladd_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmuladd_f64:
@@ -1060,18 +1000,12 @@ declare iXLen @llvm.experimental.constrained.lrint.iXLen.f64(double, metadata, m
 define iXLen @lrint_f64(double %a) nounwind strictfp {
 ; RV32IFD-LABEL: lrint_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: lrint_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: lrint_f64:
@@ -1100,18 +1034,12 @@ declare iXLen @llvm.experimental.constrained.lround.iXLen.f64(double, metadata)
 define iXLen @lround_f64(double %a) nounwind strictfp {
 ; RV32IFD-LABEL: lround_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rmm
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rmm
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: lround_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rmm
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: lround_f64:
@@ -1149,8 +1077,7 @@ define i64 @llrint_f64(double %a) nounwind strictfp {
 ;
 ; RV64IFD-LABEL: llrint_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: llrint_f64:
@@ -1188,8 +1115,7 @@ define i64 @llround_f64(double %a) nounwind strictfp {
 ;
 ; RV64IFD-LABEL: llround_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rmm
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: llround_f64:

diff  --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index 5b1be180c1b8b..74c5ea68c7d6d 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -1,8 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \
-; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV32IFD %s
+; RUN:   -verify-machineinstrs -target-abi=ilp32d \
+; RUN:   | FileCheck -check-prefix=RV32IFD %s
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
-; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV64IFD %s
+; RUN:   -verify-machineinstrs -target-abi=lp64d \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
 ; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV32I %s
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \
@@ -13,22 +15,12 @@ declare double @llvm.sqrt.f64(double)
 define double @sqrt_f64(double %a) nounwind {
 ; RV32IFD-LABEL: sqrt_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fsqrt.d ft0, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fsqrt.d fa0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: sqrt_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fsqrt.d ft0, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fsqrt.d fa0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: sqrt_f64:
@@ -68,7 +60,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    sext.w a1, a1
+; RV64IFD-NEXT:    sext.w a0, a0
 ; RV64IFD-NEXT:    call __powidf2 at plt
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -186,29 +178,17 @@ define double @sincos_f64(double %a) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -32
 ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    mv s0, a1
-; RV32IFD-NEXT:    mv s1, a0
+; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
+; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    call sin at plt
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fsd ft0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    mv a0, s1
-; RV32IFD-NEXT:    mv a1, s0
+; RV32IFD-NEXT:    fmv.d fs1, fa0
+; RV32IFD-NEXT:    fmv.d fa0, fs0
 ; RV32IFD-NEXT:    call cos at plt
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fld ft1, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    fadd.d fa0, fs1, fa0
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
 ;
@@ -216,19 +196,17 @@ define double @sincos_f64(double %a) nounwind {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -32
 ; RV64IFD-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    mv s0, a0
+; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    fmv.d fs0, fa0
 ; RV64IFD-NEXT:    call sin at plt
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    mv a0, s0
+; RV64IFD-NEXT:    fmv.d fs1, fa0
+; RV64IFD-NEXT:    fmv.d fa0, fs0
 ; RV64IFD-NEXT:    call cos at plt
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fadd.d fa0, fs1, fa0
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 32
 ; RV64IFD-NEXT:    ret
 ;
@@ -543,30 +521,12 @@ declare double @llvm.fma.f64(double, double, double)
 define double @fma_f64(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fma_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fma_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fma_f64:
@@ -595,30 +555,12 @@ declare double @llvm.fmuladd.f64(double, double, double)
 define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fmuladd_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a4, 8(sp)
-; RV32IFD-NEXT:    sw a5, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmuladd_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a2
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmuladd_f64:
@@ -661,16 +603,12 @@ declare double @llvm.fabs.f64(double)
 define double @fabs_f64(double %a) nounwind {
 ; RV32IFD-LABEL: fabs_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    lui a2, 524288
-; RV32IFD-NEXT:    addi a2, a2, -1
-; RV32IFD-NEXT:    and a1, a1, a2
+; RV32IFD-NEXT:    fabs.d fa0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fabs_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    li a1, -1
-; RV64IFD-NEXT:    srli a1, a1, 1
-; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    fabs.d fa0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fabs_f64:
@@ -695,26 +633,12 @@ declare double @llvm.minnum.f64(double, double)
 define double @minnum_f64(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: minnum_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fmin.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmin.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: minnum_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmin.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: minnum_f64:
@@ -743,26 +667,12 @@ declare double @llvm.maxnum.f64(double, double)
 define double @maxnum_f64(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: maxnum_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fmax.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fmax.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: maxnum_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fmax.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmax.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: maxnum_f64:
@@ -808,26 +718,12 @@ declare double @llvm.copysign.f64(double, double)
 define double @copysign_f64(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: copysign_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fsgnj.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fsgnj.d fa0, fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: copysign_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fsgnj.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fsgnj.d fa0, fa0, fa1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: copysign_f64:
@@ -1151,18 +1047,12 @@ declare iXLen @llvm.lrint.iXLen.f64(double)
 define iXLen @lrint_f64(double %a) nounwind {
 ; RV32IFD-LABEL: lrint_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: lrint_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: lrint_f64:
@@ -1191,18 +1081,12 @@ declare iXLen @llvm.lround.iXLen.f64(double)
 define iXLen @lround_f64(double %a) nounwind {
 ; RV32IFD-LABEL: lround_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rmm
-; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rmm
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: lround_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rmm
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: lround_f64:
@@ -1240,8 +1124,7 @@ define i64 @llrint_f64(double %a) nounwind {
 ;
 ; RV64IFD-LABEL: llrint_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: llrint_f64:
@@ -1279,8 +1162,7 @@ define i64 @llround_f64(double %a) nounwind {
 ;
 ; RV64IFD-LABEL: llround_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rmm
+; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: llround_f64:

diff  --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
index aa59ef06c5873..3442c334d7443 100644
--- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
@@ -1,19 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN:   -target-abi=ilp32d | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64IFD %s
+; RUN:   -target-abi=lp64d | FileCheck -check-prefix=RV64IFD %s
 
 define double @select_fcmp_false(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_false:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    mv a1, a3
-; RV32IFD-NEXT:    mv a0, a2
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_false:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    mv a0, a1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:    ret
   %1 = fcmp false double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -23,34 +22,20 @@ define double @select_fcmp_false(double %a, double %b) nounwind {
 define double @select_fcmp_oeq(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_oeq:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft0
+; RV32IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV32IFD-NEXT:    bnez a0, .LBB1_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft1, ft0
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB1_2:
-; RV32IFD-NEXT:    fsd ft1, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_oeq:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    feq.d a0, ft0, ft1
+; RV64IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV64IFD-NEXT:    bnez a0, .LBB1_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB1_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp oeq double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -60,34 +45,20 @@ define double @select_fcmp_oeq(double %a, double %b) nounwind {
 define double @select_fcmp_ogt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_ogt:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV32IFD-NEXT:    bnez a0, .LBB2_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft0, ft1
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB2_2:
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_ogt:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV64IFD-NEXT:    bnez a0, .LBB2_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB2_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp ogt double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -97,34 +68,20 @@ define double @select_fcmp_ogt(double %a, double %b) nounwind {
 define double @select_fcmp_oge(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_oge:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV32IFD-NEXT:    bnez a0, .LBB3_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft0, ft1
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB3_2:
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_oge:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV64IFD-NEXT:    bnez a0, .LBB3_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB3_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp oge double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -134,34 +91,20 @@ define double @select_fcmp_oge(double %a, double %b) nounwind {
 define double @select_fcmp_olt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_olt:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV32IFD-NEXT:    bnez a0, .LBB4_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft1, ft0
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB4_2:
-; RV32IFD-NEXT:    fsd ft1, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_olt:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    flt.d a0, ft0, ft1
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV64IFD-NEXT:    bnez a0, .LBB4_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB4_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp olt double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -171,34 +114,20 @@ define double @select_fcmp_olt(double %a, double %b) nounwind {
 define double @select_fcmp_ole(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_ole:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV32IFD-NEXT:    bnez a0, .LBB5_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft1, ft0
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB5_2:
-; RV32IFD-NEXT:    fsd ft1, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_ole:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fle.d a0, ft0, ft1
+; RV64IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV64IFD-NEXT:    bnez a0, .LBB5_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB5_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp ole double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -208,38 +137,24 @@ define double @select_fcmp_ole(double %a, double %b) nounwind {
 define double @select_fcmp_one(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_one:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    flt.d a1, ft0, ft1
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV32IFD-NEXT:    or a0, a1, a0
 ; RV32IFD-NEXT:    bnez a0, .LBB6_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft1, ft0
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB6_2:
-; RV32IFD-NEXT:    fsd ft1, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_one:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    flt.d a0, ft0, ft1
-; RV64IFD-NEXT:    flt.d a1, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV64IFD-NEXT:    or a0, a1, a0
 ; RV64IFD-NEXT:    bnez a0, .LBB6_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB6_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp one double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -249,38 +164,24 @@ define double @select_fcmp_one(double %a, double %b) nounwind {
 define double @select_fcmp_ord(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_ord:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft1
-; RV32IFD-NEXT:    feq.d a1, ft0, ft0
+; RV32IFD-NEXT:    feq.d a0, fa1, fa1
+; RV32IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV32IFD-NEXT:    and a0, a1, a0
 ; RV32IFD-NEXT:    bnez a0, .LBB7_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft0, ft1
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB7_2:
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_ord:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    feq.d a0, ft1, ft1
-; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa1, fa1
+; RV64IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    bnez a0, .LBB7_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB7_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp ord double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -290,38 +191,24 @@ define double @select_fcmp_ord(double %a, double %b) nounwind {
 define double @select_fcmp_ueq(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_ueq:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    flt.d a1, ft0, ft1
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV32IFD-NEXT:    or a0, a1, a0
 ; RV32IFD-NEXT:    beqz a0, .LBB8_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft1, ft0
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB8_2:
-; RV32IFD-NEXT:    fsd ft1, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_ueq:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    flt.d a0, ft0, ft1
-; RV64IFD-NEXT:    flt.d a1, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    flt.d a1, fa1, fa0
 ; RV64IFD-NEXT:    or a0, a1, a0
 ; RV64IFD-NEXT:    beqz a0, .LBB8_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB8_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp ueq double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -331,34 +218,20 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind {
 define double @select_fcmp_ugt(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_ugt:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV32IFD-NEXT:    beqz a0, .LBB9_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft1, ft0
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB9_2:
-; RV32IFD-NEXT:    fsd ft1, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_ugt:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fle.d a0, ft0, ft1
+; RV64IFD-NEXT:    fle.d a0, fa0, fa1
 ; RV64IFD-NEXT:    beqz a0, .LBB9_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB9_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp ugt double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -368,34 +241,20 @@ define double @select_fcmp_ugt(double %a, double %b) nounwind {
 define double @select_fcmp_uge(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_uge:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV32IFD-NEXT:    beqz a0, .LBB10_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft1, ft0
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB10_2:
-; RV32IFD-NEXT:    fsd ft1, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_uge:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    flt.d a0, ft0, ft1
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
 ; RV64IFD-NEXT:    beqz a0, .LBB10_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB10_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp uge double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -405,34 +264,20 @@ define double @select_fcmp_uge(double %a, double %b) nounwind {
 define double @select_fcmp_ult(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_ult:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV32IFD-NEXT:    beqz a0, .LBB11_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft0, ft1
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB11_2:
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_ult:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
 ; RV64IFD-NEXT:    beqz a0, .LBB11_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB11_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp ult double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -442,34 +287,20 @@ define double @select_fcmp_ult(double %a, double %b) nounwind {
 define double @select_fcmp_ule(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_ule:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV32IFD-NEXT:    beqz a0, .LBB12_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft0, ft1
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB12_2:
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_ule:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    flt.d a0, fa1, fa0
 ; RV64IFD-NEXT:    beqz a0, .LBB12_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB12_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp ule double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -479,34 +310,20 @@ define double @select_fcmp_ule(double %a, double %b) nounwind {
 define double @select_fcmp_une(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_une:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft0
+; RV32IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV32IFD-NEXT:    beqz a0, .LBB13_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft1, ft0
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB13_2:
-; RV32IFD-NEXT:    fsd ft1, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_une:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    feq.d a0, ft0, ft1
+; RV64IFD-NEXT:    feq.d a0, fa0, fa1
 ; RV64IFD-NEXT:    beqz a0, .LBB13_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB13_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp une double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -516,38 +333,24 @@ define double @select_fcmp_une(double %a, double %b) nounwind {
 define double @select_fcmp_uno(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_uno:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    feq.d a0, ft1, ft1
-; RV32IFD-NEXT:    feq.d a1, ft0, ft0
+; RV32IFD-NEXT:    feq.d a0, fa1, fa1
+; RV32IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV32IFD-NEXT:    and a0, a1, a0
 ; RV32IFD-NEXT:    beqz a0, .LBB14_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    fmv.d ft0, ft1
+; RV32IFD-NEXT:    fmv.d fa0, fa1
 ; RV32IFD-NEXT:  .LBB14_2:
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
-; RV32IFD-NEXT:    lw a0, 8(sp)
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_fcmp_uno:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    feq.d a0, ft1, ft1
-; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    feq.d a0, fa1, fa1
+; RV64IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    beqz a0, .LBB14_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:    fmv.d fa0, fa1
 ; RV64IFD-NEXT:  .LBB14_2:
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %1 = fcmp uno double %a, %b
   %2 = select i1 %1, double %a, double %b
@@ -571,31 +374,19 @@ define double @select_fcmp_true(double %a, double %b) nounwind {
 define i32 @i32_select_fcmp_oeq(double %a, double %b, i32 %c, i32 %d) nounwind {
 ; RV32IFD-LABEL: i32_select_fcmp_oeq:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw a2, 8(sp)
-; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    feq.d a1, ft1, ft0
-; RV32IFD-NEXT:    mv a0, a4
-; RV32IFD-NEXT:    bnez a1, .LBB16_2
+; RV32IFD-NEXT:    feq.d a2, fa0, fa1
+; RV32IFD-NEXT:    bnez a2, .LBB16_2
 ; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    mv a0, a5
+; RV32IFD-NEXT:    mv a0, a1
 ; RV32IFD-NEXT:  .LBB16_2:
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: i32_select_fcmp_oeq:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    feq.d a1, ft1, ft0
-; RV64IFD-NEXT:    mv a0, a2
-; RV64IFD-NEXT:    bnez a1, .LBB16_2
+; RV64IFD-NEXT:    feq.d a2, fa0, fa1
+; RV64IFD-NEXT:    bnez a2, .LBB16_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    mv a0, a3
+; RV64IFD-NEXT:    mv a0, a1
 ; RV64IFD-NEXT:  .LBB16_2:
 ; RV64IFD-NEXT:    ret
   %1 = fcmp oeq double %a, %b

diff  --git a/llvm/test/CodeGen/RISCV/float-arith-strict.ll b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
index e700080aeeed4..401e0f79526f0 100644
--- a/llvm/test/CodeGen/RISCV/float-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
@@ -1,8 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32IF %s
+; RUN:   -disable-strictnode-mutation -target-abi=ilp32f \
+; RUN:   | FileCheck -check-prefix=RV32IF %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64IF %s
+; RUN:   -disable-strictnode-mutation -target-abi=lp64f \
+; RUN:   | FileCheck -check-prefix=RV64IF %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -11,18 +13,12 @@
 define float @fadd_s(float %a, float %b) nounwind strictfp {
 ; RV32IF-LABEL: fadd_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fadd.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fadd_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fadd.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fadd_s:
@@ -50,18 +46,12 @@ declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, me
 define float @fsub_s(float %a, float %b) nounwind strictfp {
 ; RV32IF-LABEL: fsub_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fsub.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fsub.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fsub_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fsub.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fsub.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fsub_s:
@@ -89,18 +79,12 @@ declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, me
 define float @fmul_s(float %a, float %b) nounwind strictfp {
 ; RV32IF-LABEL: fmul_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmul.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmul.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmul_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmul.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmul.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmul_s:
@@ -128,18 +112,12 @@ declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, me
 define float @fdiv_s(float %a, float %b) nounwind strictfp {
 ; RV32IF-LABEL: fdiv_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fdiv.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fdiv.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fdiv_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fdiv.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fdiv.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fdiv_s:
@@ -167,16 +145,12 @@ declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, me
 define float @fsqrt_s(float %a) nounwind strictfp {
 ; RV32IF-LABEL: fsqrt_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fsqrt.s ft0, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fsqrt.s fa0, fa0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fsqrt_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fsqrt.s ft0, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fsqrt.s fa0, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fsqrt_s:
@@ -286,20 +260,12 @@ declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
 define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV32IF-LABEL: fmadd_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmadd_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmadd_s:
@@ -327,24 +293,16 @@ declare float @llvm.experimental.constrained.fma.f32(float, float, float, metada
 define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV32IF-LABEL: fmsub_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmv.w.x ft2, a2
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fmsub.s ft0, ft1, ft0, ft2
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV32IF-NEXT:    fmsub.s fa0, fa0, fa1, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmsub_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmv.w.x ft2, a2
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fmsub.s ft0, ft1, ft0, ft2
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV64IF-NEXT:    fmsub.s fa0, fa0, fa1, ft0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_s:
@@ -399,26 +357,18 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
 define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV32IF-LABEL: fnmadd_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a2
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV32IF-NEXT:    fnmadd.s ft0, ft2, ft0, ft1
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft1, fa0, ft0
+; RV32IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV32IF-NEXT:    fnmadd.s fa0, ft1, fa1, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmadd_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a2
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV64IF-NEXT:    fnmadd.s ft0, ft2, ft0, ft1
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft1, fa0, ft0
+; RV64IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV64IF-NEXT:    fnmadd.s fa0, ft1, fa1, ft0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s:
@@ -487,26 +437,18 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp {
 define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp {
 ; RV32IF-LABEL: fnmadd_s_2:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a2
-; RV32IF-NEXT:    fmv.w.x ft2, a1
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV32IF-NEXT:    fnmadd.s ft0, ft2, ft0, ft1
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft1, fa1, ft0
+; RV32IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV32IF-NEXT:    fnmadd.s fa0, ft1, fa0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmadd_s_2:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a2
-; RV64IF-NEXT:    fmv.w.x ft2, a1
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV64IF-NEXT:    fnmadd.s ft0, ft2, ft0, ft1
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft1, fa1, ft0
+; RV64IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV64IF-NEXT:    fnmadd.s fa0, ft1, fa0, ft0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s_2:
@@ -575,24 +517,16 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp {
 define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp {
 ; RV32IF-LABEL: fnmsub_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft0, fa0, ft0
+; RV32IF-NEXT:    fnmsub.s fa0, ft0, fa1, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmsub_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft0, fa0, ft0
+; RV64IF-NEXT:    fnmsub.s fa0, ft0, fa1, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s:
@@ -645,24 +579,16 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp {
 define float @fnmsub_s_2(float %a, float %b, float %c) nounwind strictfp {
 ; RV32IF-LABEL: fnmsub_s_2:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmv.w.x ft2, a1
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft0, fa1, ft0
+; RV32IF-NEXT:    fnmsub.s fa0, ft0, fa0, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmsub_s_2:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmv.w.x ft2, a1
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft0, fa1, ft0
+; RV64IF-NEXT:    fnmsub.s fa0, ft0, fa0, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s_2:

diff  --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index 3dd3a1810aa36..a0d5c5be7310f 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32IF %s
+; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64IF %s
+; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -16,18 +16,12 @@
 define float @fadd_s(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fadd_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fadd.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fadd_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fadd.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fadd_s:
@@ -54,18 +48,12 @@ define float @fadd_s(float %a, float %b) nounwind {
 define float @fsub_s(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fsub_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fsub.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fsub.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fsub_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fsub.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fsub.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fsub_s:
@@ -92,18 +80,12 @@ define float @fsub_s(float %a, float %b) nounwind {
 define float @fmul_s(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fmul_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmul.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmul.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmul_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmul.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmul.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmul_s:
@@ -130,18 +112,12 @@ define float @fmul_s(float %a, float %b) nounwind {
 define float @fdiv_s(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fdiv_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fdiv.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fdiv.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fdiv_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fdiv.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fdiv.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fdiv_s:
@@ -170,16 +146,12 @@ declare float @llvm.sqrt.f32(float)
 define float @fsqrt_s(float %a) nounwind {
 ; RV32IF-LABEL: fsqrt_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fsqrt.s ft0, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fsqrt.s fa0, fa0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fsqrt_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fsqrt.s ft0, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fsqrt.s fa0, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fsqrt_s:
@@ -208,18 +180,12 @@ declare float @llvm.copysign.f32(float, float)
 define float @fsgnj_s(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fsgnj_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fsgnj.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fsgnj.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fsgnj_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fsgnj.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fsgnj.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fsgnj_s:
@@ -248,16 +214,14 @@ define float @fsgnj_s(float %a, float %b) nounwind {
 define i32 @fneg_s(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fneg_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fadd.s ft0, ft0, ft0
+; RV32IF-NEXT:    fadd.s ft0, fa0, fa0
 ; RV32IF-NEXT:    fneg.s ft1, ft0
 ; RV32IF-NEXT:    feq.s a0, ft0, ft1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fneg_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fadd.s ft0, ft0, ft0
+; RV64IF-NEXT:    fadd.s ft0, fa0, fa0
 ; RV64IF-NEXT:    fneg.s ft1, ft0
 ; RV64IF-NEXT:    feq.s a0, ft0, ft1
 ; RV64IF-NEXT:    ret
@@ -301,20 +265,14 @@ define i32 @fneg_s(float %a, float %b) nounwind {
 define float @fsgnjn_s(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fsgnjn_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IF-NEXT:    fsgnjn.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fadd.s ft0, fa0, fa1
+; RV32IF-NEXT:    fsgnjn.s fa0, fa0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fsgnjn_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IF-NEXT:    fsgnjn.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fadd.s ft0, fa0, fa1
+; RV64IF-NEXT:    fsgnjn.s fa0, fa0, ft0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fsgnjn_s:
@@ -365,22 +323,16 @@ declare float @llvm.fabs.f32(float)
 define float @fabs_s(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fabs_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
+; RV32IF-NEXT:    fadd.s ft0, fa0, fa1
 ; RV32IF-NEXT:    fabs.s ft1, ft0
-; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fadd.s fa0, ft1, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fabs_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
+; RV64IF-NEXT:    fadd.s ft0, fa0, fa1
 ; RV64IF-NEXT:    fabs.s ft1, ft0
-; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fadd.s fa0, ft1, ft0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fabs_s:
@@ -421,18 +373,12 @@ declare float @llvm.minnum.f32(float, float)
 define float @fmin_s(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fmin_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmin.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmin_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmin.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmin_s:
@@ -461,18 +407,12 @@ declare float @llvm.maxnum.f32(float, float)
 define float @fmax_s(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fmax_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmax.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmax.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmax_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmax.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmax.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmax_s:
@@ -501,20 +441,12 @@ declare float @llvm.fma.f32(float, float, float)
 define float @fmadd_s(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fmadd_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmadd_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmadd_s:
@@ -541,24 +473,16 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind {
 define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fmsub_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmv.w.x ft2, a2
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fmsub.s ft0, ft1, ft0, ft2
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV32IF-NEXT:    fmsub.s fa0, fa0, fa1, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmsub_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmv.w.x ft2, a2
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fmsub.s ft0, ft1, ft0, ft2
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV64IF-NEXT:    fmsub.s fa0, fa0, fa1, ft0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_s:
@@ -613,26 +537,18 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fnmadd_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a2
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV32IF-NEXT:    fnmadd.s ft0, ft2, ft0, ft1
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft1, fa0, ft0
+; RV32IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV32IF-NEXT:    fnmadd.s fa0, ft1, fa1, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmadd_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a2
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV64IF-NEXT:    fnmadd.s ft0, ft2, ft0, ft1
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft1, fa0, ft0
+; RV64IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV64IF-NEXT:    fnmadd.s fa0, ft1, fa1, ft0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s:
@@ -701,26 +617,18 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fnmadd_s_2:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a2
-; RV32IF-NEXT:    fmv.w.x ft2, a1
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV32IF-NEXT:    fnmadd.s ft0, ft2, ft0, ft1
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft1, fa1, ft0
+; RV32IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV32IF-NEXT:    fnmadd.s fa0, ft1, fa0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmadd_s_2:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a2
-; RV64IF-NEXT:    fmv.w.x ft2, a1
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV64IF-NEXT:    fnmadd.s ft0, ft2, ft0, ft1
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft1, fa1, ft0
+; RV64IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV64IF-NEXT:    fnmadd.s fa0, ft1, fa0, ft0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s_2:
@@ -789,24 +697,16 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
 define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fnmsub_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft0, fa0, ft0
+; RV32IF-NEXT:    fnmsub.s fa0, ft0, fa1, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmsub_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft0, fa0, ft0
+; RV64IF-NEXT:    fnmsub.s fa0, ft0, fa1, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s:
@@ -859,24 +759,16 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fnmsub_s_2:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmv.w.x ft2, a1
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft0, fa1, ft0
+; RV32IF-NEXT:    fnmsub.s fa0, ft0, fa0, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmsub_s_2:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmv.w.x ft2, a1
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft0, fa1, ft0
+; RV64IF-NEXT:    fnmsub.s fa0, ft0, fa0, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s_2:
@@ -931,20 +823,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
 define float @fmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fmadd_s_contract:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmadd_s_contract:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmadd_s_contract:
@@ -982,24 +866,16 @@ define float @fmadd_s_contract(float %a, float %b, float %c) nounwind {
 define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fmsub_s_contract:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmv.w.x ft2, a2
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fmsub.s ft0, ft1, ft0, ft2
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV32IF-NEXT:    fmsub.s fa0, fa0, fa1, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmsub_s_contract:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmv.w.x ft2, a2
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fmsub.s ft0, ft1, ft0, ft2
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV64IF-NEXT:    fmsub.s fa0, fa0, fa1, ft0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_s_contract:
@@ -1060,28 +936,20 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
 define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fnmadd_s_contract:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV32IF-NEXT:    fadd.s ft0, ft0, ft3
-; RV32IF-NEXT:    fnmadd.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft1, fa0, ft0
+; RV32IF-NEXT:    fadd.s ft2, fa1, ft0
+; RV32IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV32IF-NEXT:    fnmadd.s fa0, ft1, ft2, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmadd_s_contract:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV64IF-NEXT:    fadd.s ft0, ft0, ft3
-; RV64IF-NEXT:    fnmadd.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft1, fa0, ft0
+; RV64IF-NEXT:    fadd.s ft2, fa1, ft0
+; RV64IF-NEXT:    fadd.s ft0, fa2, ft0
+; RV64IF-NEXT:    fnmadd.s fa0, ft1, ft2, ft0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s_contract:
@@ -1167,26 +1035,18 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
 define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fnmsub_s_contract:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmv.w.x ft3, zero
-; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV32IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV32IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, zero
+; RV32IF-NEXT:    fadd.s ft1, fa0, ft0
+; RV32IF-NEXT:    fadd.s ft0, fa1, ft0
+; RV32IF-NEXT:    fnmsub.s fa0, ft1, ft0, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmsub_s_contract:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmv.w.x ft3, zero
-; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
-; RV64IF-NEXT:    fadd.s ft1, ft1, ft3
-; RV64IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, zero
+; RV64IF-NEXT:    fadd.s ft1, fa0, ft0
+; RV64IF-NEXT:    fadd.s ft0, fa1, ft0
+; RV64IF-NEXT:    fnmsub.s fa0, ft1, ft0, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s_contract:

diff  --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index ea4732f72b9ca..cac2df760cfbe 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32IF %s
+; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64IF %s
+; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
 
 declare void @abort()
 declare void @exit(i32)
@@ -42,9 +42,7 @@ if.else:
 define void @br_fcmp_oeq(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_oeq:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    feq.s a0, ft1, ft0
+; RV32IF-NEXT:    feq.s a0, fa0, fa1
 ; RV32IF-NEXT:    bnez a0, .LBB1_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -55,9 +53,7 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_oeq:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    feq.s a0, ft1, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa1
 ; RV64IF-NEXT:    bnez a0, .LBB1_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -80,9 +76,7 @@ if.then:
 define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_oeq_alt:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    feq.s a0, ft1, ft0
+; RV32IF-NEXT:    feq.s a0, fa0, fa1
 ; RV32IF-NEXT:    bnez a0, .LBB2_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -93,9 +87,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_oeq_alt:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    feq.s a0, ft1, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa1
 ; RV64IF-NEXT:    bnez a0, .LBB2_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -115,9 +107,7 @@ if.else:
 define void @br_fcmp_ogt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ogt:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa1, fa0
 ; RV32IF-NEXT:    bnez a0, .LBB3_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -128,9 +118,7 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_ogt:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa1, fa0
 ; RV64IF-NEXT:    bnez a0, .LBB3_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -150,9 +138,7 @@ if.then:
 define void @br_fcmp_oge(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_oge:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fle.s a0, ft1, ft0
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
 ; RV32IF-NEXT:    bnez a0, .LBB4_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -163,9 +149,7 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_oge:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fle.s a0, ft1, ft0
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
 ; RV64IF-NEXT:    bnez a0, .LBB4_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -185,9 +169,7 @@ if.then:
 define void @br_fcmp_olt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_olt:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
 ; RV32IF-NEXT:    bnez a0, .LBB5_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -198,9 +180,7 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_olt:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
 ; RV64IF-NEXT:    bnez a0, .LBB5_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -220,9 +200,7 @@ if.then:
 define void @br_fcmp_ole(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ole:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fle.s a0, ft1, ft0
+; RV32IF-NEXT:    fle.s a0, fa0, fa1
 ; RV32IF-NEXT:    bnez a0, .LBB6_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -233,9 +211,7 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_ole:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fle.s a0, ft1, ft0
+; RV64IF-NEXT:    fle.s a0, fa0, fa1
 ; RV64IF-NEXT:    bnez a0, .LBB6_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -255,10 +231,8 @@ if.then:
 define void @br_fcmp_one(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_one:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
-; RV32IF-NEXT:    flt.s a1, ft0, ft1
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    flt.s a1, fa1, fa0
 ; RV32IF-NEXT:    or a0, a1, a0
 ; RV32IF-NEXT:    bnez a0, .LBB7_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
@@ -270,10 +244,8 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_one:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
-; RV64IF-NEXT:    flt.s a1, ft0, ft1
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    flt.s a1, fa1, fa0
 ; RV64IF-NEXT:    or a0, a1, a0
 ; RV64IF-NEXT:    bnez a0, .LBB7_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
@@ -294,10 +266,8 @@ if.then:
 define void @br_fcmp_ord(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ord:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    feq.s a0, ft1, ft1
-; RV32IF-NEXT:    feq.s a1, ft0, ft0
+; RV32IF-NEXT:    feq.s a0, fa1, fa1
+; RV32IF-NEXT:    feq.s a1, fa0, fa0
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    bnez a0, .LBB8_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
@@ -309,10 +279,8 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_ord:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    feq.s a0, ft1, ft1
-; RV64IF-NEXT:    feq.s a1, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa1, fa1
+; RV64IF-NEXT:    feq.s a1, fa0, fa0
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    bnez a0, .LBB8_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
@@ -333,10 +301,8 @@ if.then:
 define void @br_fcmp_ueq(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ueq:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
-; RV32IF-NEXT:    flt.s a1, ft0, ft1
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    flt.s a1, fa1, fa0
 ; RV32IF-NEXT:    or a0, a1, a0
 ; RV32IF-NEXT:    beqz a0, .LBB9_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
@@ -348,10 +314,8 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_ueq:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
-; RV64IF-NEXT:    flt.s a1, ft0, ft1
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    flt.s a1, fa1, fa0
 ; RV64IF-NEXT:    or a0, a1, a0
 ; RV64IF-NEXT:    beqz a0, .LBB9_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
@@ -372,9 +336,7 @@ if.then:
 define void @br_fcmp_ugt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ugt:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fle.s a0, ft1, ft0
+; RV32IF-NEXT:    fle.s a0, fa0, fa1
 ; RV32IF-NEXT:    beqz a0, .LBB10_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -385,9 +347,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_ugt:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fle.s a0, ft1, ft0
+; RV64IF-NEXT:    fle.s a0, fa0, fa1
 ; RV64IF-NEXT:    beqz a0, .LBB10_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -407,9 +367,7 @@ if.then:
 define void @br_fcmp_uge(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_uge:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
 ; RV32IF-NEXT:    beqz a0, .LBB11_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -420,9 +378,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_uge:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
 ; RV64IF-NEXT:    beqz a0, .LBB11_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -442,9 +398,7 @@ if.then:
 define void @br_fcmp_ult(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ult:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fle.s a0, ft1, ft0
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
 ; RV32IF-NEXT:    beqz a0, .LBB12_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -455,9 +409,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_ult:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fle.s a0, ft1, ft0
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
 ; RV64IF-NEXT:    beqz a0, .LBB12_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -477,9 +429,7 @@ if.then:
 define void @br_fcmp_ule(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_ule:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa1, fa0
 ; RV32IF-NEXT:    beqz a0, .LBB13_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -490,9 +440,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_ule:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa1, fa0
 ; RV64IF-NEXT:    beqz a0, .LBB13_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -512,9 +460,7 @@ if.then:
 define void @br_fcmp_une(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_une:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    feq.s a0, ft1, ft0
+; RV32IF-NEXT:    feq.s a0, fa0, fa1
 ; RV32IF-NEXT:    beqz a0, .LBB14_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -525,9 +471,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_une:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    feq.s a0, ft1, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa1
 ; RV64IF-NEXT:    beqz a0, .LBB14_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -548,10 +492,8 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
 ; TODO: sltiu+bne -> beq
 ; RV32IF-LABEL: br_fcmp_uno:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    feq.s a0, ft1, ft1
-; RV32IF-NEXT:    feq.s a1, ft0, ft0
+; RV32IF-NEXT:    feq.s a0, fa1, fa1
+; RV32IF-NEXT:    feq.s a1, fa0, fa0
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    beqz a0, .LBB15_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
@@ -563,10 +505,8 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_uno:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    feq.s a0, ft1, ft1
-; RV64IF-NEXT:    feq.s a1, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa1, fa1
+; RV64IF-NEXT:    feq.s a1, fa0, fa0
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    beqz a0, .LBB15_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
@@ -623,23 +563,21 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV32IF:       # %bb.0: # %entry
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    li a0, 0
+; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    fmv.w.x fs0, zero
+; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call dummy at plt
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, zero
-; RV32IF-NEXT:    fsw ft1, 8(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    feq.s a0, ft0, ft1
+; RV32IF-NEXT:    feq.s a0, fa0, fs0
 ; RV32IF-NEXT:    beqz a0, .LBB17_3
 ; RV32IF-NEXT:  # %bb.1: # %if.end
-; RV32IF-NEXT:    li a0, 0
+; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call dummy at plt
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    flw ft1, 8(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    feq.s a0, ft0, ft1
+; RV32IF-NEXT:    feq.s a0, fa0, fs0
 ; RV32IF-NEXT:    beqz a0, .LBB17_3
 ; RV32IF-NEXT:  # %bb.2: # %if.end4
 ; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB17_3: # %if.then
@@ -647,30 +585,24 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_store_load_stack_slot:
 ; RV64IF:       # %bb.0: # %entry
-; RV64IF-NEXT:    addi sp, sp, -32
-; RV64IF-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    fmv.w.x ft0, zero
-; RV64IF-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
-; RV64IF-NEXT:    fmv.x.w s0, ft0
-; RV64IF-NEXT:    mv a0, s0
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
+; RV64IF-NEXT:    fmv.w.x fs0, zero
+; RV64IF-NEXT:    fmv.s fa0, fs0
 ; RV64IF-NEXT:    call dummy at plt
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flw ft1, 12(sp) # 4-byte Folded Reload
-; RV64IF-NEXT:    feq.s a0, ft0, ft1
+; RV64IF-NEXT:    feq.s a0, fa0, fs0
 ; RV64IF-NEXT:    beqz a0, .LBB17_3
 ; RV64IF-NEXT:  # %bb.1: # %if.end
-; RV64IF-NEXT:    mv a0, s0
+; RV64IF-NEXT:    fmv.s fa0, fs0
 ; RV64IF-NEXT:    call dummy at plt
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flw ft1, 12(sp) # 4-byte Folded Reload
-; RV64IF-NEXT:    feq.s a0, ft0, ft1
+; RV64IF-NEXT:    feq.s a0, fa0, fs0
 ; RV64IF-NEXT:    beqz a0, .LBB17_3
 ; RV64IF-NEXT:  # %bb.2: # %if.end4
 ; RV64IF-NEXT:    li a0, 0
-; RV64IF-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 32
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB17_3: # %if.then
 ; RV64IF-NEXT:    call abort at plt

diff  --git a/llvm/test/CodeGen/RISCV/float-convert-strict.ll b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
index f1e06ec747319..b391dbdffa47f 100644
--- a/llvm/test/CodeGen/RISCV/float-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
@@ -1,8 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32IF %s
+; RUN:   -disable-strictnode-mutation -target-abi=ilp32f \
+; RUN:   | FileCheck -check-prefix=RV32IF %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64IF %s
+; RUN:   -disable-strictnode-mutation -target-abi=lp64f \
+; RUN:   | FileCheck -check-prefix=RV64IF %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -15,14 +17,12 @@
 define i32 @fcvt_w_s(float %a) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_w_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_w_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_w_s:
@@ -50,14 +50,12 @@ declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
 define i32 @fcvt_wu_s(float %a) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_wu_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_wu_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s:
@@ -87,8 +85,7 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
 define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s_multiple_use:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.wu.s a1, ft0, rtz
+; RV32IF-NEXT:    fcvt.wu.s a1, fa0, rtz
 ; RV32IF-NEXT:    li a0, 1
 ; RV32IF-NEXT:    beqz a1, .LBB2_2
 ; RV32IF-NEXT:  # %bb.1:
@@ -98,8 +95,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
 ;
 ; RV64IF-LABEL: fcvt_wu_s_multiple_use:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.wu.s a1, ft0, rtz
+; RV64IF-NEXT:    fcvt.wu.s a1, fa0, rtz
 ; RV64IF-NEXT:    li a0, 1
 ; RV64IF-NEXT:    beqz a1, .LBB2_2
 ; RV64IF-NEXT:  # %bb.1:
@@ -145,14 +141,12 @@ define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
 define float @fcvt_s_w(i32 %a) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_s_w:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.w fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_w:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.w fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_w:
@@ -182,15 +176,13 @@ define float @fcvt_s_w_load(i32* %p) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_s_w_load:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lw a0, 0(a0)
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.w fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_w_load:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lw a0, 0(a0)
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.w fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_w_load:
@@ -220,14 +212,12 @@ define float @fcvt_s_w_load(i32* %p) nounwind strictfp {
 define float @fcvt_s_wu(i32 %a) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_s_wu:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.wu ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_wu:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.wu ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_wu:
@@ -257,15 +247,13 @@ define float @fcvt_s_wu_load(i32* %p) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_s_wu_load:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lw a0, 0(a0)
-; RV32IF-NEXT:    fcvt.s.wu ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_wu_load:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lwu a0, 0(a0)
-; RV64IF-NEXT:    fcvt.s.wu ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_wu_load:
@@ -304,8 +292,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
 ;
 ; RV64IF-LABEL: fcvt_l_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_l_s:
@@ -342,8 +329,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
 ;
 ; RV64IF-LABEL: fcvt_lu_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_lu_s:
@@ -380,8 +366,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
 ;
 ; RV64IF-LABEL: fcvt_s_l:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.l ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.l fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_l:
@@ -418,8 +403,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
 ;
 ; RV64IF-LABEL: fcvt_s_lu:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.lu ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.lu fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_lu:
@@ -447,14 +431,12 @@ declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metad
 define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_s_w_i8:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.w fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_w_i8:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.w fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_w_i8:
@@ -482,14 +464,12 @@ declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadat
 define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_s_wu_i8:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.wu ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_wu_i8:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.wu ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_wu_i8:
@@ -517,14 +497,12 @@ declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadat
 define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_s_w_i16:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.w fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_w_i16:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.w fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_w_i16:
@@ -552,14 +530,12 @@ declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metad
 define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_s_wu_i16:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.wu ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_wu_i16:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.wu ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_wu_i16:

diff  --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index ecc715775ad93..e3a11088e15f3 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32IF %s
+; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64IF %s
+; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -11,14 +11,12 @@
 define i32 @fcvt_w_s(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_w_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_w_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_w_s:
@@ -45,26 +43,24 @@ define i32 @fcvt_w_s(float %a) nounwind {
 define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_w_s_sat:
 ; RV32IF:       # %bb.0: # %start
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    feq.s a0, ft0, ft0
+; RV32IF-NEXT:    feq.s a0, fa0, fa0
 ; RV32IF-NEXT:    bnez a0, .LBB1_2
 ; RV32IF-NEXT:  # %bb.1: # %start
 ; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB1_2:
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_w_s_sat:
 ; RV64IF:       # %bb.0: # %start
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    feq.s a0, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa0
 ; RV64IF-NEXT:    bnez a0, .LBB1_2
 ; RV64IF-NEXT:  # %bb.1: # %start
 ; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB1_2:
-; RV64IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_w_s_sat:
@@ -169,14 +165,12 @@ declare i32 @llvm.fptosi.sat.i32.f32(float)
 define i32 @fcvt_wu_s(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_wu_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s:
@@ -205,8 +199,7 @@ define i32 @fcvt_wu_s(float %a) nounwind {
 define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s_multiple_use:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.wu.s a1, ft0, rtz
+; RV32IF-NEXT:    fcvt.wu.s a1, fa0, rtz
 ; RV32IF-NEXT:    li a0, 1
 ; RV32IF-NEXT:    beqz a1, .LBB3_2
 ; RV32IF-NEXT:  # %bb.1:
@@ -216,8 +209,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
 ;
 ; RV64IF-LABEL: fcvt_wu_s_multiple_use:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.wu.s a1, ft0, rtz
+; RV64IF-NEXT:    fcvt.wu.s a1, fa0, rtz
 ; RV64IF-NEXT:    li a0, 1
 ; RV64IF-NEXT:    beqz a1, .LBB3_2
 ; RV64IF-NEXT:  # %bb.1:
@@ -263,26 +255,24 @@ define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
 define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s_sat:
 ; RV32IF:       # %bb.0: # %start
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    feq.s a0, ft0, ft0
+; RV32IF-NEXT:    feq.s a0, fa0, fa0
 ; RV32IF-NEXT:    bnez a0, .LBB4_2
 ; RV32IF-NEXT:  # %bb.1: # %start
 ; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB4_2:
-; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_wu_s_sat:
 ; RV64IF:       # %bb.0: # %start
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    feq.s a0, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa0
 ; RV64IF-NEXT:    bnez a0, .LBB4_2
 ; RV64IF-NEXT:  # %bb.1: # %start
 ; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB4_2:
-; RV64IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_sat:
@@ -363,17 +353,13 @@ declare i32 @llvm.fptoui.sat.i32.f32(float)
 define i32 @fmv_x_w(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fmv_x_w:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
+; RV32IF-NEXT:    fadd.s ft0, fa0, fa1
 ; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmv_x_w:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
+; RV64IF-NEXT:    fadd.s ft0, fa0, fa1
 ; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
 ;
@@ -403,14 +389,12 @@ define i32 @fmv_x_w(float %a, float %b) nounwind {
 define float @fcvt_s_w(i32 %a) nounwind {
 ; RV32IF-LABEL: fcvt_s_w:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.w fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_w:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.w fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_w:
@@ -439,15 +423,13 @@ define float @fcvt_s_w_load(i32* %p) nounwind {
 ; RV32IF-LABEL: fcvt_s_w_load:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lw a0, 0(a0)
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.w fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_w_load:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lw a0, 0(a0)
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.w fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_w_load:
@@ -477,14 +459,12 @@ define float @fcvt_s_w_load(i32* %p) nounwind {
 define float @fcvt_s_wu(i32 %a) nounwind {
 ; RV32IF-LABEL: fcvt_s_wu:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.wu ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_wu:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.wu ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_wu:
@@ -513,15 +493,13 @@ define float @fcvt_s_wu_load(i32* %p) nounwind {
 ; RV32IF-LABEL: fcvt_s_wu_load:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lw a0, 0(a0)
-; RV32IF-NEXT:    fcvt.s.wu ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_wu_load:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lwu a0, 0(a0)
-; RV64IF-NEXT:    fcvt.s.wu ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_wu_load:
@@ -553,16 +531,14 @@ define float @fmv_w_x(i32 %a, i32 %b) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fadd.s ft0, ft0, ft1
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fadd.s fa0, ft0, ft1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmv_w_x:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fadd.s ft0, ft0, ft1
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fadd.s fa0, ft0, ft1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmv_w_x:
@@ -601,8 +577,7 @@ define i64 @fcvt_l_s(float %a) nounwind {
 ;
 ; RV64IF-LABEL: fcvt_l_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_l_s:
@@ -632,11 +607,11 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    lui a1, %hi(.LCPI12_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_0)(a1)
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fsw ft1, 4(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    fle.s s0, ft0, ft1
+; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    lui a0, %hi(.LCPI12_0)
+; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_0)(a0)
+; RV32IF-NEXT:    fmv.s fs0, fa0
+; RV32IF-NEXT:    fle.s s0, ft0, fa0
 ; RV32IF-NEXT:    call __fixsfdi at plt
 ; RV32IF-NEXT:    mv a2, a0
 ; RV32IF-NEXT:    bnez s0, .LBB12_2
@@ -645,13 +620,11 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:  .LBB12_2: # %start
 ; RV32IF-NEXT:    lui a0, %hi(.LCPI12_1)
 ; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_1)(a0)
-; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    flt.s a3, ft0, ft1
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    flt.s a3, ft0, fs0
 ; RV32IF-NEXT:    li a0, -1
 ; RV32IF-NEXT:    beqz a3, .LBB12_9
 ; RV32IF-NEXT:  # %bb.3: # %start
-; RV32IF-NEXT:    feq.s a2, ft0, ft0
+; RV32IF-NEXT:    feq.s a2, fs0, fs0
 ; RV32IF-NEXT:    beqz a2, .LBB12_10
 ; RV32IF-NEXT:  .LBB12_4: # %start
 ; RV32IF-NEXT:    lui a4, 524288
@@ -665,11 +638,12 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:  .LBB12_8: # %start
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB12_9: # %start
 ; RV32IF-NEXT:    mv a0, a2
-; RV32IF-NEXT:    feq.s a2, ft0, ft0
+; RV32IF-NEXT:    feq.s a2, fs0, fs0
 ; RV32IF-NEXT:    bnez a2, .LBB12_4
 ; RV32IF-NEXT:  .LBB12_10: # %start
 ; RV32IF-NEXT:    li a0, 0
@@ -685,14 +659,13 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ;
 ; RV64IF-LABEL: fcvt_l_s_sat:
 ; RV64IF:       # %bb.0: # %start
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    feq.s a0, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa0
 ; RV64IF-NEXT:    bnez a0, .LBB12_2
 ; RV64IF-NEXT:  # %bb.1: # %start
 ; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB12_2:
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_l_s_sat:
@@ -839,8 +812,7 @@ define i64 @fcvt_lu_s(float %a) nounwind {
 ;
 ; RV64IF-LABEL: fcvt_lu_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_lu_s:
@@ -870,10 +842,10 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    fmv.w.x ft1, a0
+; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    fmv.w.x ft0, zero
-; RV32IF-NEXT:    fsw ft1, 4(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    fle.s s0, ft0, ft1
+; RV32IF-NEXT:    fle.s s0, ft0, fa0
 ; RV32IF-NEXT:    call __fixunssfdi at plt
 ; RV32IF-NEXT:    mv a3, a0
 ; RV32IF-NEXT:    bnez s0, .LBB14_2
@@ -882,8 +854,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:  .LBB14_2: # %start
 ; RV32IF-NEXT:    lui a0, %hi(.LCPI14_0)
 ; RV32IF-NEXT:    flw ft0, %lo(.LCPI14_0)(a0)
-; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    flt.s a4, ft0, ft1
+; RV32IF-NEXT:    flt.s a4, ft0, fs0
 ; RV32IF-NEXT:    li a2, -1
 ; RV32IF-NEXT:    li a0, -1
 ; RV32IF-NEXT:    beqz a4, .LBB14_7
@@ -897,6 +868,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    mv a1, a2
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB14_7: # %start
@@ -909,14 +881,13 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ;
 ; RV64IF-LABEL: fcvt_lu_s_sat:
 ; RV64IF:       # %bb.0: # %start
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    feq.s a0, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa0
 ; RV64IF-NEXT:    bnez a0, .LBB14_2
 ; RV64IF-NEXT:  # %bb.1: # %start
 ; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB14_2:
-; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_lu_s_sat:
@@ -1031,8 +1002,7 @@ define float @fcvt_s_l(i64 %a) nounwind {
 ;
 ; RV64IF-LABEL: fcvt_s_l:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.l ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.l fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_l:
@@ -1068,8 +1038,7 @@ define float @fcvt_s_lu(i64 %a) nounwind {
 ;
 ; RV64IF-LABEL: fcvt_s_lu:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.lu ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.lu fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_lu:
@@ -1096,14 +1065,12 @@ define float @fcvt_s_lu(i64 %a) nounwind {
 define float @fcvt_s_w_i8(i8 signext %a) nounwind {
 ; RV32IF-LABEL: fcvt_s_w_i8:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.w fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_w_i8:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.w fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_w_i8:
@@ -1130,14 +1097,12 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind {
 define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
 ; RV32IF-LABEL: fcvt_s_wu_i8:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.wu ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_wu_i8:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.wu ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_wu_i8:
@@ -1164,14 +1129,12 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
 define float @fcvt_s_w_i16(i16 signext %a) nounwind {
 ; RV32IF-LABEL: fcvt_s_w_i16:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.w fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_w_i16:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.w fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_w_i16:
@@ -1198,14 +1161,12 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind {
 define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
 ; RV32IF-LABEL: fcvt_s_wu_i16:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.s.wu ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_wu_i16:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.s.wu ft0, a0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fcvt.s.wu fa0, a0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_wu_i16:
@@ -1346,14 +1307,12 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, float* %1) nounwind
 define signext i16 @fcvt_w_s_i16(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_w_s_i16:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_w_s_i16:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_w_s_i16:
@@ -1380,37 +1339,35 @@ define signext i16 @fcvt_w_s_i16(float %a) nounwind {
 define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_w_s_sat_i16:
 ; RV32IF:       # %bb.0: # %start
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    feq.s a0, ft0, ft0
+; RV32IF-NEXT:    feq.s a0, fa0, fa0
 ; RV32IF-NEXT:    bnez a0, .LBB24_2
 ; RV32IF-NEXT:  # %bb.1: # %start
 ; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB24_2:
 ; RV32IF-NEXT:    lui a0, %hi(.LCPI24_0)
-; RV32IF-NEXT:    flw ft1, %lo(.LCPI24_0)(a0)
+; RV32IF-NEXT:    flw ft0, %lo(.LCPI24_0)(a0)
 ; RV32IF-NEXT:    lui a0, %hi(.LCPI24_1)
-; RV32IF-NEXT:    flw ft2, %lo(.LCPI24_1)(a0)
-; RV32IF-NEXT:    fmax.s ft0, ft0, ft1
-; RV32IF-NEXT:    fmin.s ft0, ft0, ft2
+; RV32IF-NEXT:    flw ft1, %lo(.LCPI24_1)(a0)
+; RV32IF-NEXT:    fmax.s ft0, fa0, ft0
+; RV32IF-NEXT:    fmin.s ft0, ft0, ft1
 ; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_w_s_sat_i16:
 ; RV64IF:       # %bb.0: # %start
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    feq.s a0, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa0
 ; RV64IF-NEXT:    bnez a0, .LBB24_2
 ; RV64IF-NEXT:  # %bb.1: # %start
 ; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB24_2:
 ; RV64IF-NEXT:    lui a0, %hi(.LCPI24_0)
-; RV64IF-NEXT:    flw ft1, %lo(.LCPI24_0)(a0)
+; RV64IF-NEXT:    flw ft0, %lo(.LCPI24_0)(a0)
 ; RV64IF-NEXT:    lui a0, %hi(.LCPI24_1)
-; RV64IF-NEXT:    flw ft2, %lo(.LCPI24_1)(a0)
-; RV64IF-NEXT:    fmax.s ft0, ft0, ft1
-; RV64IF-NEXT:    fmin.s ft0, ft0, ft2
+; RV64IF-NEXT:    flw ft1, %lo(.LCPI24_1)(a0)
+; RV64IF-NEXT:    fmax.s ft0, fa0, ft0
+; RV64IF-NEXT:    fmin.s ft0, ft0, ft1
 ; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
 ; RV64IF-NEXT:    ret
 ;
@@ -1514,14 +1471,12 @@ declare i16 @llvm.fptosi.sat.i16.f32(float)
 define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s_i16:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_wu_s_i16:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_i16:
@@ -1548,22 +1503,20 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
 define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s_sat_i16:
 ; RV32IF:       # %bb.0: # %start
-; RV32IF-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a1)
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmv.w.x ft2, zero
-; RV32IF-NEXT:    fmax.s ft1, ft1, ft2
+; RV32IF-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a0)
+; RV32IF-NEXT:    fmv.w.x ft1, zero
+; RV32IF-NEXT:    fmax.s ft1, fa0, ft1
 ; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
 ; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_wu_s_sat_i16:
 ; RV64IF:       # %bb.0: # %start
-; RV64IF-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV64IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a1)
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmv.w.x ft2, zero
-; RV64IF-NEXT:    fmax.s ft1, ft1, ft2
+; RV64IF-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV64IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a0)
+; RV64IF-NEXT:    fmv.w.x ft1, zero
+; RV64IF-NEXT:    fmax.s ft1, fa0, ft1
 ; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
 ; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
 ; RV64IF-NEXT:    ret
@@ -1650,14 +1603,12 @@ declare i16 @llvm.fptoui.sat.i16.f32(float)
 define signext i8 @fcvt_w_s_i8(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_w_s_i8:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_w_s_i8:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_w_s_i8:
@@ -1684,37 +1635,35 @@ define signext i8 @fcvt_w_s_i8(float %a) nounwind {
 define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_w_s_sat_i8:
 ; RV32IF:       # %bb.0: # %start
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    feq.s a0, ft0, ft0
+; RV32IF-NEXT:    feq.s a0, fa0, fa0
 ; RV32IF-NEXT:    bnez a0, .LBB28_2
 ; RV32IF-NEXT:  # %bb.1: # %start
 ; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB28_2:
 ; RV32IF-NEXT:    lui a0, %hi(.LCPI28_0)
-; RV32IF-NEXT:    flw ft1, %lo(.LCPI28_0)(a0)
+; RV32IF-NEXT:    flw ft0, %lo(.LCPI28_0)(a0)
 ; RV32IF-NEXT:    lui a0, %hi(.LCPI28_1)
-; RV32IF-NEXT:    flw ft2, %lo(.LCPI28_1)(a0)
-; RV32IF-NEXT:    fmax.s ft0, ft0, ft1
-; RV32IF-NEXT:    fmin.s ft0, ft0, ft2
+; RV32IF-NEXT:    flw ft1, %lo(.LCPI28_1)(a0)
+; RV32IF-NEXT:    fmax.s ft0, fa0, ft0
+; RV32IF-NEXT:    fmin.s ft0, ft0, ft1
 ; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_w_s_sat_i8:
 ; RV64IF:       # %bb.0: # %start
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    feq.s a0, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa0
 ; RV64IF-NEXT:    bnez a0, .LBB28_2
 ; RV64IF-NEXT:  # %bb.1: # %start
 ; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB28_2:
 ; RV64IF-NEXT:    lui a0, %hi(.LCPI28_0)
-; RV64IF-NEXT:    flw ft1, %lo(.LCPI28_0)(a0)
+; RV64IF-NEXT:    flw ft0, %lo(.LCPI28_0)(a0)
 ; RV64IF-NEXT:    lui a0, %hi(.LCPI28_1)
-; RV64IF-NEXT:    flw ft2, %lo(.LCPI28_1)(a0)
-; RV64IF-NEXT:    fmax.s ft0, ft0, ft1
-; RV64IF-NEXT:    fmin.s ft0, ft0, ft2
+; RV64IF-NEXT:    flw ft1, %lo(.LCPI28_1)(a0)
+; RV64IF-NEXT:    fmax.s ft0, fa0, ft0
+; RV64IF-NEXT:    fmin.s ft0, ft0, ft1
 ; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
 ; RV64IF-NEXT:    ret
 ;
@@ -1816,14 +1765,12 @@ declare i8 @llvm.fptosi.sat.i8.f32(float)
 define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s_i8:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_wu_s_i8:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_i8:
@@ -1850,22 +1797,20 @@ define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
 define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s_sat_i8:
 ; RV32IF:       # %bb.0: # %start
-; RV32IF-NEXT:    lui a1, %hi(.LCPI30_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI30_0)(a1)
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmv.w.x ft2, zero
-; RV32IF-NEXT:    fmax.s ft1, ft1, ft2
+; RV32IF-NEXT:    lui a0, %hi(.LCPI30_0)
+; RV32IF-NEXT:    flw ft0, %lo(.LCPI30_0)(a0)
+; RV32IF-NEXT:    fmv.w.x ft1, zero
+; RV32IF-NEXT:    fmax.s ft1, fa0, ft1
 ; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
 ; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_wu_s_sat_i8:
 ; RV64IF:       # %bb.0: # %start
-; RV64IF-NEXT:    lui a1, %hi(.LCPI30_0)
-; RV64IF-NEXT:    flw ft0, %lo(.LCPI30_0)(a1)
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmv.w.x ft2, zero
-; RV64IF-NEXT:    fmax.s ft1, ft1, ft2
+; RV64IF-NEXT:    lui a0, %hi(.LCPI30_0)
+; RV64IF-NEXT:    flw ft0, %lo(.LCPI30_0)(a0)
+; RV64IF-NEXT:    fmv.w.x ft1, zero
+; RV64IF-NEXT:    fmax.s ft1, fa0, ft1
 ; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
 ; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
 ; RV64IF-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll
index c0cd6eda64670..7464553ad325b 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32IF %s
+; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64IF %s
+; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -36,16 +36,12 @@ define i32 @fcmp_false(float %a, float %b) nounwind {
 define i32 @fcmp_oeq(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_oeq:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    feq.s a0, ft1, ft0
+; RV32IF-NEXT:    feq.s a0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_oeq:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    feq.s a0, ft1, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_oeq:
@@ -75,16 +71,12 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind {
 define i32 @fcmp_ogt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_ogt:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa1, fa0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_ogt:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa1, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_ogt:
@@ -114,16 +106,12 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind {
 define i32 @fcmp_oge(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_oge:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fle.s a0, ft1, ft0
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_oge:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fle.s a0, ft1, ft0
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_oge:
@@ -155,16 +143,12 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
 define i32 @fcmp_olt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_olt:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_olt:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_olt:
@@ -194,16 +178,12 @@ define i32 @fcmp_olt(float %a, float %b) nounwind {
 define i32 @fcmp_ole(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_ole:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fle.s a0, ft1, ft0
+; RV32IF-NEXT:    fle.s a0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_ole:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fle.s a0, ft1, ft0
+; RV64IF-NEXT:    fle.s a0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_ole:
@@ -233,19 +213,15 @@ define i32 @fcmp_ole(float %a, float %b) nounwind {
 define i32 @fcmp_one(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_one:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
-; RV32IF-NEXT:    flt.s a1, ft0, ft1
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    flt.s a1, fa1, fa0
 ; RV32IF-NEXT:    or a0, a1, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_one:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
-; RV64IF-NEXT:    flt.s a1, ft0, ft1
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    flt.s a1, fa1, fa0
 ; RV64IF-NEXT:    or a0, a1, a0
 ; RV64IF-NEXT:    ret
 ;
@@ -302,19 +278,15 @@ define i32 @fcmp_one(float %a, float %b) nounwind {
 define i32 @fcmp_ord(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_ord:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    feq.s a0, ft1, ft1
-; RV32IF-NEXT:    feq.s a1, ft0, ft0
+; RV32IF-NEXT:    feq.s a0, fa1, fa1
+; RV32IF-NEXT:    feq.s a1, fa0, fa0
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_ord:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    feq.s a0, ft1, ft1
-; RV64IF-NEXT:    feq.s a1, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa1, fa1
+; RV64IF-NEXT:    feq.s a1, fa0, fa0
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    ret
 ;
@@ -345,20 +317,16 @@ define i32 @fcmp_ord(float %a, float %b) nounwind {
 define i32 @fcmp_ueq(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_ueq:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
-; RV32IF-NEXT:    flt.s a1, ft0, ft1
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    flt.s a1, fa1, fa0
 ; RV32IF-NEXT:    or a0, a1, a0
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_ueq:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
-; RV64IF-NEXT:    flt.s a1, ft0, ft1
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    flt.s a1, fa1, fa0
 ; RV64IF-NEXT:    or a0, a1, a0
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    ret
@@ -416,17 +384,13 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind {
 define i32 @fcmp_ugt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_ugt:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fle.s a0, ft1, ft0
+; RV32IF-NEXT:    fle.s a0, fa0, fa1
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_ugt:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fle.s a0, ft1, ft0
+; RV64IF-NEXT:    fle.s a0, fa0, fa1
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    ret
 ;
@@ -457,17 +421,13 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind {
 define i32 @fcmp_uge(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_uge:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_uge:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    ret
 ;
@@ -500,17 +460,13 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
 define i32 @fcmp_ult(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_ult:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fle.s a0, ft1, ft0
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_ult:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fle.s a0, ft1, ft0
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    ret
 ;
@@ -541,17 +497,13 @@ define i32 @fcmp_ult(float %a, float %b) nounwind {
 define i32 @fcmp_ule(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_ule:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa1, fa0
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_ule:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa1, fa0
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    ret
 ;
@@ -582,17 +534,13 @@ define i32 @fcmp_ule(float %a, float %b) nounwind {
 define i32 @fcmp_une(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_une:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    feq.s a0, ft1, ft0
+; RV32IF-NEXT:    feq.s a0, fa0, fa1
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_une:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    feq.s a0, ft1, ft0
+; RV64IF-NEXT:    feq.s a0, fa0, fa1
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    ret
 ;
@@ -623,20 +571,16 @@ define i32 @fcmp_une(float %a, float %b) nounwind {
 define i32 @fcmp_uno(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_uno:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    feq.s a0, ft1, ft1
-; RV32IF-NEXT:    feq.s a1, ft0, ft0
+; RV32IF-NEXT:    feq.s a0, fa1, fa1
+; RV32IF-NEXT:    feq.s a1, fa0, fa0
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    xori a0, a0, 1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_uno:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    feq.s a0, ft1, ft1
-; RV64IF-NEXT:    feq.s a1, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa1, fa1
+; RV64IF-NEXT:    feq.s a1, fa0, fa0
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    xori a0, a0, 1
 ; RV64IF-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
index 7dee4588fe467..eca9a931f2974 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+f \
-; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
+; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=ilp32f \
 ; RUN:   | FileCheck -check-prefix=RV32IF %s
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+f \
-; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
+; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64f \
 ; RUN:   | FileCheck -check-prefix=RV64IF %s
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
 ; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
@@ -17,16 +17,12 @@ declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
 define float @sqrt_f32(float %a) nounwind strictfp {
 ; RV32IF-LABEL: sqrt_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fsqrt.s ft0, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fsqrt.s fa0, fa0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: sqrt_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fsqrt.s ft0, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fsqrt.s fa0, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: sqrt_f32:
@@ -66,7 +62,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    sext.w a1, a1
+; RV64IF-NEXT:    sext.w a0, a0
 ; RV64IF-NEXT:    call __powisf2 at plt
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
@@ -184,40 +180,36 @@ define float @sincos_f32(float %a) nounwind strictfp {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    mv s0, a0
+; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    fsw fs1, 4(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    call sinf at plt
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fsw ft0, 4(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    mv a0, s0
+; RV32IF-NEXT:    fmv.s fs1, fa0
+; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call cosf at plt
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fadd.s fa0, fs1, fa0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    flw fs1, 4(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: sincos_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -32
-; RV64IF-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    mv s0, a0
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
+; RV64IF-NEXT:    fsw fs1, 0(sp) # 4-byte Folded Spill
+; RV64IF-NEXT:    fmv.s fs0, fa0
 ; RV64IF-NEXT:    call sinf at plt
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
-; RV64IF-NEXT:    mv a0, s0
+; RV64IF-NEXT:    fmv.s fs1, fa0
+; RV64IF-NEXT:    fmv.s fa0, fs0
 ; RV64IF-NEXT:    call cosf at plt
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flw ft1, 12(sp) # 4-byte Folded Reload
-; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
-; RV64IF-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 32
+; RV64IF-NEXT:    fadd.s fa0, fs1, fa0
+; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
+; RV64IF-NEXT:    flw fs1, 0(sp) # 4-byte Folded Reload
+; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: sincos_f32:
@@ -522,20 +514,12 @@ declare float @llvm.experimental.constrained.fma.f32(float, float, float, metada
 define float @fma_f32(float %a, float %b, float %c) nounwind strictfp {
 ; RV32IF-LABEL: fma_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fma_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fma_f32:
@@ -564,20 +548,12 @@ declare float @llvm.experimental.constrained.fmuladd.f32(float, float, float, me
 define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp {
 ; RV32IF-LABEL: fmuladd_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmuladd_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmuladd_f32:
@@ -1011,14 +987,12 @@ declare iXLen @llvm.experimental.constrained.lrint.iXLen.f32(float, metadata, me
 define iXLen @lrint_f32(float %a) nounwind strictfp {
 ; RV32IF-LABEL: lrint_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0
+; RV32IF-NEXT:    fcvt.w.s a0, fa0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: lrint_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0
+; RV64IF-NEXT:    fcvt.l.s a0, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: lrint_f32:
@@ -1047,14 +1021,12 @@ declare iXLen @llvm.experimental.constrained.lround.iXLen.f32(float, metadata)
 define iXLen @lround_f32(float %a) nounwind strictfp {
 ; RV32IF-LABEL: lround_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rmm
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: lround_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: lround_f32:
@@ -1092,8 +1064,7 @@ define i64 @llrint_f32(float %a) nounwind strictfp {
 ;
 ; RV64IF-LABEL: llrint_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0
+; RV64IF-NEXT:    fcvt.l.s a0, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: llrint_f32:
@@ -1131,8 +1102,7 @@ define i64 @llround_f32(float %a) nounwind strictfp {
 ;
 ; RV64IF-LABEL: llround_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: llround_f32:

diff  --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index bfc35fed1fcfd..2a544b6245a1c 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -1,12 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+f \
-; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV32IF %s
+; RUN:   -verify-machineinstrs -target-abi=ilp32f \
+; RUN:   | FileCheck -check-prefix=RV32IF %s
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \
-; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV32IF %s
+; RUN:   -verify-machineinstrs -target-abi=ilp32f \
+; RUN:   | FileCheck -check-prefix=RV32IF %s
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+f \
-; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV64IF %s
+; RUN:   -verify-machineinstrs -target-abi=lp64f \
+; RUN:   | FileCheck -check-prefix=RV64IF %s
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
-; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV64IF %s
+; RUN:   -verify-machineinstrs -target-abi=lp64d \
+; RUN:   | FileCheck -check-prefix=RV64IF %s
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
 ; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV32I %s
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \
@@ -17,16 +21,12 @@ declare float @llvm.sqrt.f32(float)
 define float @sqrt_f32(float %a) nounwind {
 ; RV32IF-LABEL: sqrt_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fsqrt.s ft0, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fsqrt.s fa0, fa0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: sqrt_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fsqrt.s ft0, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fsqrt.s fa0, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: sqrt_f32:
@@ -66,7 +66,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    sext.w a1, a1
+; RV64IF-NEXT:    sext.w a0, a0
 ; RV64IF-NEXT:    call __powisf2 at plt
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
@@ -184,42 +184,20 @@ define float @sincos_f32(float %a) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    mv s0, a0
+; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    fsw fs1, 4(sp) # 4-byte Folded Spill
+; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    call sinf at plt
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fsw ft0, 4(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    mv a0, s0
+; RV32IF-NEXT:    fmv.s fs1, fa0
+; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call cosf at plt
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fadd.s fa0, fs1, fa0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
+; RV32IF-NEXT:    flw fs1, 4(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
-; RV64IF-LABEL: sincos_f32:
-; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi sp, sp, -32
-; RV64IF-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    mv s0, a0
-; RV64IF-NEXT:    call sinf at plt
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
-; RV64IF-NEXT:    mv a0, s0
-; RV64IF-NEXT:    call cosf at plt
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flw ft1, 12(sp) # 4-byte Folded Reload
-; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
-; RV64IF-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
-; RV64IF-NEXT:    addi sp, sp, 32
-; RV64IF-NEXT:    ret
-;
 ; RV32I-LABEL: sincos_f32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -522,20 +500,12 @@ declare float @llvm.fma.f32(float, float, float)
 define float @fma_f32(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fma_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fma_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fma_f32:
@@ -564,20 +534,12 @@ declare float @llvm.fmuladd.f32(float, float, float)
 define float @fmuladd_f32(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fmuladd_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a2
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmuladd_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a2
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmuladd_f32:
@@ -616,16 +578,12 @@ declare float @llvm.fabs.f32(float)
 define float @fabs_f32(float %a) nounwind {
 ; RV32IF-LABEL: fabs_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    lui a1, 524288
-; RV32IF-NEXT:    addi a1, a1, -1
-; RV32IF-NEXT:    and a0, a0, a1
+; RV32IF-NEXT:    fabs.s fa0, fa0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fabs_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    lui a1, 524288
-; RV64IF-NEXT:    addiw a1, a1, -1
-; RV64IF-NEXT:    and a0, a0, a1
+; RV64IF-NEXT:    fabs.s fa0, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fabs_f32:
@@ -650,18 +608,12 @@ declare float @llvm.minnum.f32(float, float)
 define float @minnum_f32(float %a, float %b) nounwind {
 ; RV32IF-LABEL: minnum_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmin.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: minnum_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmin.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: minnum_f32:
@@ -690,18 +642,12 @@ declare float @llvm.maxnum.f32(float, float)
 define float @maxnum_f32(float %a, float %b) nounwind {
 ; RV32IF-LABEL: maxnum_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmax.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmax.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: maxnum_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmax.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmax.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: maxnum_f32:
@@ -747,18 +693,12 @@ declare float @llvm.copysign.f32(float, float)
 define float @copysign_f32(float %a, float %b) nounwind {
 ; RV32IF-LABEL: copysign_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fsgnj.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fsgnj.s fa0, fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: copysign_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fsgnj.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fsgnj.s fa0, fa0, fa1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: copysign_f32:
@@ -1081,14 +1021,12 @@ declare iXLen @llvm.lrint.iXLen.f32(float)
 define iXLen @lrint_f32(float %a) nounwind {
 ; RV32IF-LABEL: lrint_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0
+; RV32IF-NEXT:    fcvt.w.s a0, fa0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: lrint_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0
+; RV64IF-NEXT:    fcvt.l.s a0, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: lrint_f32:
@@ -1117,14 +1055,12 @@ declare iXLen @llvm.lround.iXLen.f32(float)
 define iXLen @lround_f32(float %a) nounwind {
 ; RV32IF-LABEL: lround_f32:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rmm
+; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: lround_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: lround_f32:
@@ -1162,8 +1098,7 @@ define i64 @llrint_f32(float %a) nounwind {
 ;
 ; RV64IF-LABEL: llrint_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0
+; RV64IF-NEXT:    fcvt.l.s a0, fa0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: llrint_f32:
@@ -1201,8 +1136,7 @@ define i64 @llround_f32(float %a) nounwind {
 ;
 ; RV64IF-LABEL: llround_f32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: llround_f32:

diff  --git a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
index 17ef878bc264e..b3c2533411980 100644
--- a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
@@ -1,18 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32IF %s
+; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64IF %s
+; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
 
 define float @select_fcmp_false(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_false:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    mv a0, a1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_false:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    mv a0, a1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:    ret
   %1 = fcmp false float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -22,26 +22,20 @@ define float @select_fcmp_false(float %a, float %b) nounwind {
 define float @select_fcmp_oeq(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_oeq:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    feq.s a0, ft0, ft1
+; RV32IF-NEXT:    feq.s a0, fa0, fa1
 ; RV32IF-NEXT:    bnez a0, .LBB1_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB1_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_oeq:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    feq.s a0, ft0, ft1
+; RV64IF-NEXT:    feq.s a0, fa0, fa1
 ; RV64IF-NEXT:    bnez a0, .LBB1_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB1_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp oeq float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -51,26 +45,20 @@ define float @select_fcmp_oeq(float %a, float %b) nounwind {
 define float @select_fcmp_ogt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_ogt:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa1, fa0
 ; RV32IF-NEXT:    bnez a0, .LBB2_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB2_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_ogt:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa1, fa0
 ; RV64IF-NEXT:    bnez a0, .LBB2_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB2_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp ogt float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -80,26 +68,20 @@ define float @select_fcmp_ogt(float %a, float %b) nounwind {
 define float @select_fcmp_oge(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_oge:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fle.s a0, ft1, ft0
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
 ; RV32IF-NEXT:    bnez a0, .LBB3_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB3_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_oge:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fle.s a0, ft1, ft0
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
 ; RV64IF-NEXT:    bnez a0, .LBB3_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB3_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp oge float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -109,26 +91,20 @@ define float @select_fcmp_oge(float %a, float %b) nounwind {
 define float @select_fcmp_olt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_olt:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    flt.s a0, ft0, ft1
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
 ; RV32IF-NEXT:    bnez a0, .LBB4_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB4_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_olt:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flt.s a0, ft0, ft1
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
 ; RV64IF-NEXT:    bnez a0, .LBB4_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB4_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp olt float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -138,26 +114,20 @@ define float @select_fcmp_olt(float %a, float %b) nounwind {
 define float @select_fcmp_ole(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_ole:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fle.s a0, ft0, ft1
+; RV32IF-NEXT:    fle.s a0, fa0, fa1
 ; RV32IF-NEXT:    bnez a0, .LBB5_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB5_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_ole:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fle.s a0, ft0, ft1
+; RV64IF-NEXT:    fle.s a0, fa0, fa1
 ; RV64IF-NEXT:    bnez a0, .LBB5_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB5_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp ole float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -167,30 +137,24 @@ define float @select_fcmp_ole(float %a, float %b) nounwind {
 define float @select_fcmp_one(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_one:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    flt.s a0, ft0, ft1
-; RV32IF-NEXT:    flt.s a1, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    flt.s a1, fa1, fa0
 ; RV32IF-NEXT:    or a0, a1, a0
 ; RV32IF-NEXT:    bnez a0, .LBB6_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB6_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_one:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flt.s a0, ft0, ft1
-; RV64IF-NEXT:    flt.s a1, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    flt.s a1, fa1, fa0
 ; RV64IF-NEXT:    or a0, a1, a0
 ; RV64IF-NEXT:    bnez a0, .LBB6_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB6_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp one float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -200,30 +164,24 @@ define float @select_fcmp_one(float %a, float %b) nounwind {
 define float @select_fcmp_ord(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_ord:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    feq.s a0, ft1, ft1
-; RV32IF-NEXT:    feq.s a1, ft0, ft0
+; RV32IF-NEXT:    feq.s a0, fa1, fa1
+; RV32IF-NEXT:    feq.s a1, fa0, fa0
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    bnez a0, .LBB7_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB7_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_ord:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    feq.s a0, ft1, ft1
-; RV64IF-NEXT:    feq.s a1, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa1, fa1
+; RV64IF-NEXT:    feq.s a1, fa0, fa0
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    bnez a0, .LBB7_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB7_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp ord float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -233,30 +191,24 @@ define float @select_fcmp_ord(float %a, float %b) nounwind {
 define float @select_fcmp_ueq(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_ueq:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    flt.s a0, ft0, ft1
-; RV32IF-NEXT:    flt.s a1, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    flt.s a1, fa1, fa0
 ; RV32IF-NEXT:    or a0, a1, a0
 ; RV32IF-NEXT:    beqz a0, .LBB8_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB8_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_ueq:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flt.s a0, ft0, ft1
-; RV64IF-NEXT:    flt.s a1, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    flt.s a1, fa1, fa0
 ; RV64IF-NEXT:    or a0, a1, a0
 ; RV64IF-NEXT:    beqz a0, .LBB8_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB8_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp ueq float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -266,26 +218,20 @@ define float @select_fcmp_ueq(float %a, float %b) nounwind {
 define float @select_fcmp_ugt(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_ugt:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fle.s a0, ft0, ft1
+; RV32IF-NEXT:    fle.s a0, fa0, fa1
 ; RV32IF-NEXT:    beqz a0, .LBB9_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB9_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_ugt:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fle.s a0, ft0, ft1
+; RV64IF-NEXT:    fle.s a0, fa0, fa1
 ; RV64IF-NEXT:    beqz a0, .LBB9_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB9_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp ugt float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -295,26 +241,20 @@ define float @select_fcmp_ugt(float %a, float %b) nounwind {
 define float @select_fcmp_uge(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_uge:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    flt.s a0, ft0, ft1
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
 ; RV32IF-NEXT:    beqz a0, .LBB10_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB10_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_uge:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    flt.s a0, ft0, ft1
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
 ; RV64IF-NEXT:    beqz a0, .LBB10_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB10_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp uge float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -324,26 +264,20 @@ define float @select_fcmp_uge(float %a, float %b) nounwind {
 define float @select_fcmp_ult(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_ult:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fle.s a0, ft1, ft0
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
 ; RV32IF-NEXT:    beqz a0, .LBB11_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB11_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_ult:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fle.s a0, ft1, ft0
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
 ; RV64IF-NEXT:    beqz a0, .LBB11_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB11_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp ult float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -353,26 +287,20 @@ define float @select_fcmp_ult(float %a, float %b) nounwind {
 define float @select_fcmp_ule(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_ule:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    flt.s a0, fa1, fa0
 ; RV32IF-NEXT:    beqz a0, .LBB12_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB12_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_ule:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    flt.s a0, fa1, fa0
 ; RV64IF-NEXT:    beqz a0, .LBB12_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB12_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp ule float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -382,26 +310,20 @@ define float @select_fcmp_ule(float %a, float %b) nounwind {
 define float @select_fcmp_une(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_une:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    feq.s a0, ft0, ft1
+; RV32IF-NEXT:    feq.s a0, fa0, fa1
 ; RV32IF-NEXT:    beqz a0, .LBB13_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB13_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_une:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    feq.s a0, ft0, ft1
+; RV64IF-NEXT:    feq.s a0, fa0, fa1
 ; RV64IF-NEXT:    beqz a0, .LBB13_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB13_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp une float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -411,30 +333,24 @@ define float @select_fcmp_une(float %a, float %b) nounwind {
 define float @select_fcmp_uno(float %a, float %b) nounwind {
 ; RV32IF-LABEL: select_fcmp_uno:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    feq.s a0, ft1, ft1
-; RV32IF-NEXT:    feq.s a1, ft0, ft0
+; RV32IF-NEXT:    feq.s a0, fa1, fa1
+; RV32IF-NEXT:    feq.s a1, fa0, fa0
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    beqz a0, .LBB14_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    fmv.s ft0, ft1
+; RV32IF-NEXT:    fmv.s fa0, fa1
 ; RV32IF-NEXT:  .LBB14_2:
-; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: select_fcmp_uno:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    feq.s a0, ft1, ft1
-; RV64IF-NEXT:    feq.s a1, ft0, ft0
+; RV64IF-NEXT:    feq.s a0, fa1, fa1
+; RV64IF-NEXT:    feq.s a1, fa0, fa0
 ; RV64IF-NEXT:    and a0, a1, a0
 ; RV64IF-NEXT:    beqz a0, .LBB14_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    fmv.s ft0, ft1
+; RV64IF-NEXT:    fmv.s fa0, fa1
 ; RV64IF-NEXT:  .LBB14_2:
-; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %1 = fcmp uno float %a, %b
   %2 = select i1 %1, float %a, float %b
@@ -458,25 +374,19 @@ define float @select_fcmp_true(float %a, float %b) nounwind {
 define i32 @i32_select_fcmp_oeq(float %a, float %b, i32 %c, i32 %d) nounwind {
 ; RV32IF-LABEL: i32_select_fcmp_oeq:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    feq.s a1, ft1, ft0
-; RV32IF-NEXT:    mv a0, a2
-; RV32IF-NEXT:    bnez a1, .LBB16_2
+; RV32IF-NEXT:    feq.s a2, fa0, fa1
+; RV32IF-NEXT:    bnez a2, .LBB16_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    mv a0, a3
+; RV32IF-NEXT:    mv a0, a1
 ; RV32IF-NEXT:  .LBB16_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: i32_select_fcmp_oeq:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    feq.s a1, ft1, ft0
-; RV64IF-NEXT:    mv a0, a2
-; RV64IF-NEXT:    bnez a1, .LBB16_2
+; RV64IF-NEXT:    feq.s a2, fa0, fa1
+; RV64IF-NEXT:    bnez a2, .LBB16_2
 ; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    mv a0, a3
+; RV64IF-NEXT:    mv a0, a1
 ; RV64IF-NEXT:  .LBB16_2:
 ; RV64IF-NEXT:    ret
   %1 = fcmp oeq float %a, %b


        


More information about the llvm-commits mailing list