[llvm] 7b0c418 - [RISCV] Move compressible registers to the beginning of the FP allocation order.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 27 17:30:17 PDT 2023


Author: Craig Topper
Date: 2023-03-27T17:29:28-07:00
New Revision: 7b0c41841eb7e1c2f56384c421918ff3fb2d9058

URL: https://github.com/llvm/llvm-project/commit/7b0c41841eb7e1c2f56384c421918ff3fb2d9058
DIFF: https://github.com/llvm/llvm-project/commit/7b0c41841eb7e1c2f56384c421918ff3fb2d9058.diff

LOG: [RISCV] Move compressible registers to the beginning of the FP allocation order.

We don't have very many compressible FP instructions, just load and store.
These instruction require the FP register to be f8-f15.

This patch changes the FP allocation order to prioritize f10-f15 first.
These are also the FP argument registers. So I allocated them in reverse
order starting at f15 to avoid taking the first argument registers.
This appears to match gcc allocation order.

Reviewed By: asb

Differential Revision: https://reviews.llvm.org/D146488

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVRegisterInfo.td
    llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
    llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
    llvm/test/CodeGen/RISCV/calling-conv-half.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
    llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll
    llvm/test/CodeGen/RISCV/codemodel-lowering.ll
    llvm/test/CodeGen/RISCV/copysign-casts.ll
    llvm/test/CodeGen/RISCV/double-arith-strict.ll
    llvm/test/CodeGen/RISCV/double-arith.ll
    llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
    llvm/test/CodeGen/RISCV/double-calling-conv.ll
    llvm/test/CodeGen/RISCV/double-convert-strict.ll
    llvm/test/CodeGen/RISCV/double-convert.ll
    llvm/test/CodeGen/RISCV/double-imm.ll
    llvm/test/CodeGen/RISCV/double-intrinsics.ll
    llvm/test/CodeGen/RISCV/double-mem.ll
    llvm/test/CodeGen/RISCV/double-previous-failure.ll
    llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
    llvm/test/CodeGen/RISCV/double-round-conv.ll
    llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
    llvm/test/CodeGen/RISCV/double_reduct.ll
    llvm/test/CodeGen/RISCV/float-arith-strict.ll
    llvm/test/CodeGen/RISCV/float-arith.ll
    llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
    llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
    llvm/test/CodeGen/RISCV/float-convert-strict.ll
    llvm/test/CodeGen/RISCV/float-convert.ll
    llvm/test/CodeGen/RISCV/float-imm.ll
    llvm/test/CodeGen/RISCV/float-intrinsics.ll
    llvm/test/CodeGen/RISCV/float-mem.ll
    llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
    llvm/test/CodeGen/RISCV/float-round-conv.ll
    llvm/test/CodeGen/RISCV/fp-imm.ll
    llvm/test/CodeGen/RISCV/half-arith-strict.ll
    llvm/test/CodeGen/RISCV/half-arith.ll
    llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
    llvm/test/CodeGen/RISCV/half-br-fcmp.ll
    llvm/test/CodeGen/RISCV/half-convert-strict.ll
    llvm/test/CodeGen/RISCV/half-convert.ll
    llvm/test/CodeGen/RISCV/half-fcmp.ll
    llvm/test/CodeGen/RISCV/half-imm.ll
    llvm/test/CodeGen/RISCV/half-intrinsics.ll
    llvm/test/CodeGen/RISCV/half-isnan.ll
    llvm/test/CodeGen/RISCV/half-mem.ll
    llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
    llvm/test/CodeGen/RISCV/half-round-conv.ll
    llvm/test/CodeGen/RISCV/half-select-fcmp.ll
    llvm/test/CodeGen/RISCV/half-select-icmp.ll
    llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
    llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
    llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
    llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
    llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
    llvm/test/CodeGen/RISCV/machine-combiner.ll
    llvm/test/CodeGen/RISCV/machine-cse.ll
    llvm/test/CodeGen/RISCV/make-compressible.mir
    llvm/test/CodeGen/RISCV/module-target-abi.ll
    llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll
    llvm/test/CodeGen/RISCV/rv64f-float-convert.ll
    llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll
    llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll
    llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert.ll
    llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll
    llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
    llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
    llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll
    llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
    llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll
    llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll
    llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
    llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
    llvm/test/CodeGen/RISCV/rvv/round-vp.ll
    llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
    llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
    llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
    llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
    llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
    llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
    llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll
    llvm/test/CodeGen/RISCV/select-const.ll
    llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
    llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
    llvm/test/CodeGen/RISCV/subtarget-features-std-ext.ll
    llvm/test/CodeGen/RISCV/vararg.ll
    llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
    llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
    llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index d06453c82739..a555202db366 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -223,39 +223,44 @@ let RegAltNameIndices = [ABIRegAltName] in {
 
 // The order of registers represents the preferred allocation sequence,
 // meaning caller-save regs are listed before callee-save.
+// We start by allocating argument registers in reverse order since they are
+// compressible.
 def FPR16 : RegisterClass<"RISCV", [f16], 16, (add
-    (sequence "F%u_H", 0, 7),
-    (sequence "F%u_H", 10, 17),
-    (sequence "F%u_H", 28, 31),
-    (sequence "F%u_H", 8, 9),
-    (sequence "F%u_H", 18, 27)
+    (sequence "F%u_H", 15, 10), // fa5-fa0
+    (sequence "F%u_H", 0, 7),   // ft0-f7
+    (sequence "F%u_H", 16, 17), // fa6-fa7
+    (sequence "F%u_H", 28, 31), // ft8-ft11
+    (sequence "F%u_H", 8, 9),   // fs0-fs1
+    (sequence "F%u_H", 18, 27)  // fs2-fs11
 )>;
 
 def FPR32 : RegisterClass<"RISCV", [f32], 32, (add
+    (sequence "F%u_F", 15, 10),
     (sequence "F%u_F", 0, 7),
-    (sequence "F%u_F", 10, 17),
+    (sequence "F%u_F", 16, 17),
     (sequence "F%u_F", 28, 31),
     (sequence "F%u_F", 8, 9),
     (sequence "F%u_F", 18, 27)
 )>;
 
 def FPR32C : RegisterClass<"RISCV", [f32], 32, (add
-  (sequence "F%u_F", 10, 15),
+  (sequence "F%u_F", 15, 10),
   (sequence "F%u_F", 8, 9)
 )>;
 
 // The order of registers represents the preferred allocation sequence,
 // meaning caller-save regs are listed before callee-save.
 def FPR64 : RegisterClass<"RISCV", [f64], 64, (add
+    (sequence "F%u_D", 15, 10),
     (sequence "F%u_D", 0, 7),
-    (sequence "F%u_D", 10, 17),
+    (sequence "F%u_D", 16, 17),
     (sequence "F%u_D", 28, 31),
     (sequence "F%u_D", 8, 9),
     (sequence "F%u_D", 18, 27)
 )>;
 
 def FPR64C : RegisterClass<"RISCV", [f64], 64, (add
-  (sequence "F%u_D", 10, 15),
+  (sequence "F%u_D", 15, 10),
   (sequence "F%u_D", 8, 9)
 )>;
 

diff  --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
index 18b601163fa4..7111316931f1 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
@@ -24,21 +24,21 @@ define void @callee() nounwind {
 ; ILP32-LABEL: callee:
 ; ILP32:       # %bb.0:
 ; ILP32-NEXT:    lui a0, %hi(var)
-; ILP32-NEXT:    flw ft0, %lo(var)(a0)
-; ILP32-NEXT:    flw ft1, %lo(var+4)(a0)
-; ILP32-NEXT:    flw ft2, %lo(var+8)(a0)
-; ILP32-NEXT:    flw ft3, %lo(var+12)(a0)
+; ILP32-NEXT:    flw fa5, %lo(var)(a0)
+; ILP32-NEXT:    flw fa4, %lo(var+4)(a0)
+; ILP32-NEXT:    flw fa3, %lo(var+8)(a0)
+; ILP32-NEXT:    flw fa2, %lo(var+12)(a0)
 ; ILP32-NEXT:    addi a1, a0, %lo(var)
-; ILP32-NEXT:    flw ft4, 16(a1)
-; ILP32-NEXT:    flw ft5, 20(a1)
-; ILP32-NEXT:    flw ft6, 24(a1)
-; ILP32-NEXT:    flw ft7, 28(a1)
-; ILP32-NEXT:    flw fa0, 32(a1)
-; ILP32-NEXT:    flw fa1, 36(a1)
-; ILP32-NEXT:    flw fa2, 40(a1)
-; ILP32-NEXT:    flw fa3, 44(a1)
-; ILP32-NEXT:    flw fa4, 48(a1)
-; ILP32-NEXT:    flw fa5, 52(a1)
+; ILP32-NEXT:    flw fa1, 16(a1)
+; ILP32-NEXT:    flw fa0, 20(a1)
+; ILP32-NEXT:    flw ft0, 24(a1)
+; ILP32-NEXT:    flw ft1, 28(a1)
+; ILP32-NEXT:    flw ft2, 32(a1)
+; ILP32-NEXT:    flw ft3, 36(a1)
+; ILP32-NEXT:    flw ft4, 40(a1)
+; ILP32-NEXT:    flw ft5, 44(a1)
+; ILP32-NEXT:    flw ft6, 48(a1)
+; ILP32-NEXT:    flw ft7, 52(a1)
 ; ILP32-NEXT:    flw fa6, 56(a1)
 ; ILP32-NEXT:    flw fa7, 60(a1)
 ; ILP32-NEXT:    flw ft8, 64(a1)
@@ -75,40 +75,40 @@ define void @callee() nounwind {
 ; ILP32-NEXT:    fsw ft8, 64(a1)
 ; ILP32-NEXT:    fsw fa7, 60(a1)
 ; ILP32-NEXT:    fsw fa6, 56(a1)
-; ILP32-NEXT:    fsw fa5, 52(a1)
-; ILP32-NEXT:    fsw fa4, 48(a1)
-; ILP32-NEXT:    fsw fa3, 44(a1)
-; ILP32-NEXT:    fsw fa2, 40(a1)
-; ILP32-NEXT:    fsw fa1, 36(a1)
-; ILP32-NEXT:    fsw fa0, 32(a1)
-; ILP32-NEXT:    fsw ft7, 28(a1)
-; ILP32-NEXT:    fsw ft6, 24(a1)
-; ILP32-NEXT:    fsw ft5, 20(a1)
-; ILP32-NEXT:    fsw ft4, 16(a1)
-; ILP32-NEXT:    fsw ft3, %lo(var+12)(a0)
-; ILP32-NEXT:    fsw ft2, %lo(var+8)(a0)
-; ILP32-NEXT:    fsw ft1, %lo(var+4)(a0)
-; ILP32-NEXT:    fsw ft0, %lo(var)(a0)
+; ILP32-NEXT:    fsw ft7, 52(a1)
+; ILP32-NEXT:    fsw ft6, 48(a1)
+; ILP32-NEXT:    fsw ft5, 44(a1)
+; ILP32-NEXT:    fsw ft4, 40(a1)
+; ILP32-NEXT:    fsw ft3, 36(a1)
+; ILP32-NEXT:    fsw ft2, 32(a1)
+; ILP32-NEXT:    fsw ft1, 28(a1)
+; ILP32-NEXT:    fsw ft0, 24(a1)
+; ILP32-NEXT:    fsw fa0, 20(a1)
+; ILP32-NEXT:    fsw fa1, 16(a1)
+; ILP32-NEXT:    fsw fa2, %lo(var+12)(a0)
+; ILP32-NEXT:    fsw fa3, %lo(var+8)(a0)
+; ILP32-NEXT:    fsw fa4, %lo(var+4)(a0)
+; ILP32-NEXT:    fsw fa5, %lo(var)(a0)
 ; ILP32-NEXT:    ret
 ;
 ; LP64-LABEL: callee:
 ; LP64:       # %bb.0:
 ; LP64-NEXT:    lui a0, %hi(var)
-; LP64-NEXT:    flw ft0, %lo(var)(a0)
-; LP64-NEXT:    flw ft1, %lo(var+4)(a0)
-; LP64-NEXT:    flw ft2, %lo(var+8)(a0)
-; LP64-NEXT:    flw ft3, %lo(var+12)(a0)
+; LP64-NEXT:    flw fa5, %lo(var)(a0)
+; LP64-NEXT:    flw fa4, %lo(var+4)(a0)
+; LP64-NEXT:    flw fa3, %lo(var+8)(a0)
+; LP64-NEXT:    flw fa2, %lo(var+12)(a0)
 ; LP64-NEXT:    addi a1, a0, %lo(var)
-; LP64-NEXT:    flw ft4, 16(a1)
-; LP64-NEXT:    flw ft5, 20(a1)
-; LP64-NEXT:    flw ft6, 24(a1)
-; LP64-NEXT:    flw ft7, 28(a1)
-; LP64-NEXT:    flw fa0, 32(a1)
-; LP64-NEXT:    flw fa1, 36(a1)
-; LP64-NEXT:    flw fa2, 40(a1)
-; LP64-NEXT:    flw fa3, 44(a1)
-; LP64-NEXT:    flw fa4, 48(a1)
-; LP64-NEXT:    flw fa5, 52(a1)
+; LP64-NEXT:    flw fa1, 16(a1)
+; LP64-NEXT:    flw fa0, 20(a1)
+; LP64-NEXT:    flw ft0, 24(a1)
+; LP64-NEXT:    flw ft1, 28(a1)
+; LP64-NEXT:    flw ft2, 32(a1)
+; LP64-NEXT:    flw ft3, 36(a1)
+; LP64-NEXT:    flw ft4, 40(a1)
+; LP64-NEXT:    flw ft5, 44(a1)
+; LP64-NEXT:    flw ft6, 48(a1)
+; LP64-NEXT:    flw ft7, 52(a1)
 ; LP64-NEXT:    flw fa6, 56(a1)
 ; LP64-NEXT:    flw fa7, 60(a1)
 ; LP64-NEXT:    flw ft8, 64(a1)
@@ -145,20 +145,20 @@ define void @callee() nounwind {
 ; LP64-NEXT:    fsw ft8, 64(a1)
 ; LP64-NEXT:    fsw fa7, 60(a1)
 ; LP64-NEXT:    fsw fa6, 56(a1)
-; LP64-NEXT:    fsw fa5, 52(a1)
-; LP64-NEXT:    fsw fa4, 48(a1)
-; LP64-NEXT:    fsw fa3, 44(a1)
-; LP64-NEXT:    fsw fa2, 40(a1)
-; LP64-NEXT:    fsw fa1, 36(a1)
-; LP64-NEXT:    fsw fa0, 32(a1)
-; LP64-NEXT:    fsw ft7, 28(a1)
-; LP64-NEXT:    fsw ft6, 24(a1)
-; LP64-NEXT:    fsw ft5, 20(a1)
-; LP64-NEXT:    fsw ft4, 16(a1)
-; LP64-NEXT:    fsw ft3, %lo(var+12)(a0)
-; LP64-NEXT:    fsw ft2, %lo(var+8)(a0)
-; LP64-NEXT:    fsw ft1, %lo(var+4)(a0)
-; LP64-NEXT:    fsw ft0, %lo(var)(a0)
+; LP64-NEXT:    fsw ft7, 52(a1)
+; LP64-NEXT:    fsw ft6, 48(a1)
+; LP64-NEXT:    fsw ft5, 44(a1)
+; LP64-NEXT:    fsw ft4, 40(a1)
+; LP64-NEXT:    fsw ft3, 36(a1)
+; LP64-NEXT:    fsw ft2, 32(a1)
+; LP64-NEXT:    fsw ft1, 28(a1)
+; LP64-NEXT:    fsw ft0, 24(a1)
+; LP64-NEXT:    fsw fa0, 20(a1)
+; LP64-NEXT:    fsw fa1, 16(a1)
+; LP64-NEXT:    fsw fa2, %lo(var+12)(a0)
+; LP64-NEXT:    fsw fa3, %lo(var+8)(a0)
+; LP64-NEXT:    fsw fa4, %lo(var+4)(a0)
+; LP64-NEXT:    fsw fa5, %lo(var)(a0)
 ; LP64-NEXT:    ret
 ;
 ; ILP32F-LABEL: callee:
@@ -177,21 +177,21 @@ define void @callee() nounwind {
 ; ILP32F-NEXT:    fsw fs10, 4(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    fsw fs11, 0(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    lui a0, %hi(var)
-; ILP32F-NEXT:    flw ft0, %lo(var)(a0)
-; ILP32F-NEXT:    flw ft1, %lo(var+4)(a0)
-; ILP32F-NEXT:    flw ft2, %lo(var+8)(a0)
-; ILP32F-NEXT:    flw ft3, %lo(var+12)(a0)
+; ILP32F-NEXT:    flw fa5, %lo(var)(a0)
+; ILP32F-NEXT:    flw fa4, %lo(var+4)(a0)
+; ILP32F-NEXT:    flw fa3, %lo(var+8)(a0)
+; ILP32F-NEXT:    flw fa2, %lo(var+12)(a0)
 ; ILP32F-NEXT:    addi a1, a0, %lo(var)
-; ILP32F-NEXT:    flw ft4, 16(a1)
-; ILP32F-NEXT:    flw ft5, 20(a1)
-; ILP32F-NEXT:    flw ft6, 24(a1)
-; ILP32F-NEXT:    flw ft7, 28(a1)
-; ILP32F-NEXT:    flw fa0, 32(a1)
-; ILP32F-NEXT:    flw fa1, 36(a1)
-; ILP32F-NEXT:    flw fa2, 40(a1)
-; ILP32F-NEXT:    flw fa3, 44(a1)
-; ILP32F-NEXT:    flw fa4, 48(a1)
-; ILP32F-NEXT:    flw fa5, 52(a1)
+; ILP32F-NEXT:    flw fa1, 16(a1)
+; ILP32F-NEXT:    flw fa0, 20(a1)
+; ILP32F-NEXT:    flw ft0, 24(a1)
+; ILP32F-NEXT:    flw ft1, 28(a1)
+; ILP32F-NEXT:    flw ft2, 32(a1)
+; ILP32F-NEXT:    flw ft3, 36(a1)
+; ILP32F-NEXT:    flw ft4, 40(a1)
+; ILP32F-NEXT:    flw ft5, 44(a1)
+; ILP32F-NEXT:    flw ft6, 48(a1)
+; ILP32F-NEXT:    flw ft7, 52(a1)
 ; ILP32F-NEXT:    flw fa6, 56(a1)
 ; ILP32F-NEXT:    flw fa7, 60(a1)
 ; ILP32F-NEXT:    flw ft8, 64(a1)
@@ -228,20 +228,20 @@ define void @callee() nounwind {
 ; ILP32F-NEXT:    fsw ft8, 64(a1)
 ; ILP32F-NEXT:    fsw fa7, 60(a1)
 ; ILP32F-NEXT:    fsw fa6, 56(a1)
-; ILP32F-NEXT:    fsw fa5, 52(a1)
-; ILP32F-NEXT:    fsw fa4, 48(a1)
-; ILP32F-NEXT:    fsw fa3, 44(a1)
-; ILP32F-NEXT:    fsw fa2, 40(a1)
-; ILP32F-NEXT:    fsw fa1, 36(a1)
-; ILP32F-NEXT:    fsw fa0, 32(a1)
-; ILP32F-NEXT:    fsw ft7, 28(a1)
-; ILP32F-NEXT:    fsw ft6, 24(a1)
-; ILP32F-NEXT:    fsw ft5, 20(a1)
-; ILP32F-NEXT:    fsw ft4, 16(a1)
-; ILP32F-NEXT:    fsw ft3, %lo(var+12)(a0)
-; ILP32F-NEXT:    fsw ft2, %lo(var+8)(a0)
-; ILP32F-NEXT:    fsw ft1, %lo(var+4)(a0)
-; ILP32F-NEXT:    fsw ft0, %lo(var)(a0)
+; ILP32F-NEXT:    fsw ft7, 52(a1)
+; ILP32F-NEXT:    fsw ft6, 48(a1)
+; ILP32F-NEXT:    fsw ft5, 44(a1)
+; ILP32F-NEXT:    fsw ft4, 40(a1)
+; ILP32F-NEXT:    fsw ft3, 36(a1)
+; ILP32F-NEXT:    fsw ft2, 32(a1)
+; ILP32F-NEXT:    fsw ft1, 28(a1)
+; ILP32F-NEXT:    fsw ft0, 24(a1)
+; ILP32F-NEXT:    fsw fa0, 20(a1)
+; ILP32F-NEXT:    fsw fa1, 16(a1)
+; ILP32F-NEXT:    fsw fa2, %lo(var+12)(a0)
+; ILP32F-NEXT:    fsw fa3, %lo(var+8)(a0)
+; ILP32F-NEXT:    fsw fa4, %lo(var+4)(a0)
+; ILP32F-NEXT:    fsw fa5, %lo(var)(a0)
 ; ILP32F-NEXT:    flw fs0, 44(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    flw fs1, 40(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    flw fs2, 36(sp) # 4-byte Folded Reload
@@ -273,21 +273,21 @@ define void @callee() nounwind {
 ; LP64F-NEXT:    fsw fs10, 4(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    fsw fs11, 0(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    lui a0, %hi(var)
-; LP64F-NEXT:    flw ft0, %lo(var)(a0)
-; LP64F-NEXT:    flw ft1, %lo(var+4)(a0)
-; LP64F-NEXT:    flw ft2, %lo(var+8)(a0)
-; LP64F-NEXT:    flw ft3, %lo(var+12)(a0)
+; LP64F-NEXT:    flw fa5, %lo(var)(a0)
+; LP64F-NEXT:    flw fa4, %lo(var+4)(a0)
+; LP64F-NEXT:    flw fa3, %lo(var+8)(a0)
+; LP64F-NEXT:    flw fa2, %lo(var+12)(a0)
 ; LP64F-NEXT:    addi a1, a0, %lo(var)
-; LP64F-NEXT:    flw ft4, 16(a1)
-; LP64F-NEXT:    flw ft5, 20(a1)
-; LP64F-NEXT:    flw ft6, 24(a1)
-; LP64F-NEXT:    flw ft7, 28(a1)
-; LP64F-NEXT:    flw fa0, 32(a1)
-; LP64F-NEXT:    flw fa1, 36(a1)
-; LP64F-NEXT:    flw fa2, 40(a1)
-; LP64F-NEXT:    flw fa3, 44(a1)
-; LP64F-NEXT:    flw fa4, 48(a1)
-; LP64F-NEXT:    flw fa5, 52(a1)
+; LP64F-NEXT:    flw fa1, 16(a1)
+; LP64F-NEXT:    flw fa0, 20(a1)
+; LP64F-NEXT:    flw ft0, 24(a1)
+; LP64F-NEXT:    flw ft1, 28(a1)
+; LP64F-NEXT:    flw ft2, 32(a1)
+; LP64F-NEXT:    flw ft3, 36(a1)
+; LP64F-NEXT:    flw ft4, 40(a1)
+; LP64F-NEXT:    flw ft5, 44(a1)
+; LP64F-NEXT:    flw ft6, 48(a1)
+; LP64F-NEXT:    flw ft7, 52(a1)
 ; LP64F-NEXT:    flw fa6, 56(a1)
 ; LP64F-NEXT:    flw fa7, 60(a1)
 ; LP64F-NEXT:    flw ft8, 64(a1)
@@ -324,20 +324,20 @@ define void @callee() nounwind {
 ; LP64F-NEXT:    fsw ft8, 64(a1)
 ; LP64F-NEXT:    fsw fa7, 60(a1)
 ; LP64F-NEXT:    fsw fa6, 56(a1)
-; LP64F-NEXT:    fsw fa5, 52(a1)
-; LP64F-NEXT:    fsw fa4, 48(a1)
-; LP64F-NEXT:    fsw fa3, 44(a1)
-; LP64F-NEXT:    fsw fa2, 40(a1)
-; LP64F-NEXT:    fsw fa1, 36(a1)
-; LP64F-NEXT:    fsw fa0, 32(a1)
-; LP64F-NEXT:    fsw ft7, 28(a1)
-; LP64F-NEXT:    fsw ft6, 24(a1)
-; LP64F-NEXT:    fsw ft5, 20(a1)
-; LP64F-NEXT:    fsw ft4, 16(a1)
-; LP64F-NEXT:    fsw ft3, %lo(var+12)(a0)
-; LP64F-NEXT:    fsw ft2, %lo(var+8)(a0)
-; LP64F-NEXT:    fsw ft1, %lo(var+4)(a0)
-; LP64F-NEXT:    fsw ft0, %lo(var)(a0)
+; LP64F-NEXT:    fsw ft7, 52(a1)
+; LP64F-NEXT:    fsw ft6, 48(a1)
+; LP64F-NEXT:    fsw ft5, 44(a1)
+; LP64F-NEXT:    fsw ft4, 40(a1)
+; LP64F-NEXT:    fsw ft3, 36(a1)
+; LP64F-NEXT:    fsw ft2, 32(a1)
+; LP64F-NEXT:    fsw ft1, 28(a1)
+; LP64F-NEXT:    fsw ft0, 24(a1)
+; LP64F-NEXT:    fsw fa0, 20(a1)
+; LP64F-NEXT:    fsw fa1, 16(a1)
+; LP64F-NEXT:    fsw fa2, %lo(var+12)(a0)
+; LP64F-NEXT:    fsw fa3, %lo(var+8)(a0)
+; LP64F-NEXT:    fsw fa4, %lo(var+4)(a0)
+; LP64F-NEXT:    fsw fa5, %lo(var)(a0)
 ; LP64F-NEXT:    flw fs0, 44(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    flw fs1, 40(sp) # 4-byte Folded Reload
 ; LP64F-NEXT:    flw fs2, 36(sp) # 4-byte Folded Reload
@@ -369,21 +369,21 @@ define void @callee() nounwind {
 ; ILP32D-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fsd fs11, 0(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    lui a0, %hi(var)
-; ILP32D-NEXT:    flw ft0, %lo(var)(a0)
-; ILP32D-NEXT:    flw ft1, %lo(var+4)(a0)
-; ILP32D-NEXT:    flw ft2, %lo(var+8)(a0)
-; ILP32D-NEXT:    flw ft3, %lo(var+12)(a0)
+; ILP32D-NEXT:    flw fa5, %lo(var)(a0)
+; ILP32D-NEXT:    flw fa4, %lo(var+4)(a0)
+; ILP32D-NEXT:    flw fa3, %lo(var+8)(a0)
+; ILP32D-NEXT:    flw fa2, %lo(var+12)(a0)
 ; ILP32D-NEXT:    addi a1, a0, %lo(var)
-; ILP32D-NEXT:    flw ft4, 16(a1)
-; ILP32D-NEXT:    flw ft5, 20(a1)
-; ILP32D-NEXT:    flw ft6, 24(a1)
-; ILP32D-NEXT:    flw ft7, 28(a1)
-; ILP32D-NEXT:    flw fa0, 32(a1)
-; ILP32D-NEXT:    flw fa1, 36(a1)
-; ILP32D-NEXT:    flw fa2, 40(a1)
-; ILP32D-NEXT:    flw fa3, 44(a1)
-; ILP32D-NEXT:    flw fa4, 48(a1)
-; ILP32D-NEXT:    flw fa5, 52(a1)
+; ILP32D-NEXT:    flw fa1, 16(a1)
+; ILP32D-NEXT:    flw fa0, 20(a1)
+; ILP32D-NEXT:    flw ft0, 24(a1)
+; ILP32D-NEXT:    flw ft1, 28(a1)
+; ILP32D-NEXT:    flw ft2, 32(a1)
+; ILP32D-NEXT:    flw ft3, 36(a1)
+; ILP32D-NEXT:    flw ft4, 40(a1)
+; ILP32D-NEXT:    flw ft5, 44(a1)
+; ILP32D-NEXT:    flw ft6, 48(a1)
+; ILP32D-NEXT:    flw ft7, 52(a1)
 ; ILP32D-NEXT:    flw fa6, 56(a1)
 ; ILP32D-NEXT:    flw fa7, 60(a1)
 ; ILP32D-NEXT:    flw ft8, 64(a1)
@@ -420,20 +420,20 @@ define void @callee() nounwind {
 ; ILP32D-NEXT:    fsw ft8, 64(a1)
 ; ILP32D-NEXT:    fsw fa7, 60(a1)
 ; ILP32D-NEXT:    fsw fa6, 56(a1)
-; ILP32D-NEXT:    fsw fa5, 52(a1)
-; ILP32D-NEXT:    fsw fa4, 48(a1)
-; ILP32D-NEXT:    fsw fa3, 44(a1)
-; ILP32D-NEXT:    fsw fa2, 40(a1)
-; ILP32D-NEXT:    fsw fa1, 36(a1)
-; ILP32D-NEXT:    fsw fa0, 32(a1)
-; ILP32D-NEXT:    fsw ft7, 28(a1)
-; ILP32D-NEXT:    fsw ft6, 24(a1)
-; ILP32D-NEXT:    fsw ft5, 20(a1)
-; ILP32D-NEXT:    fsw ft4, 16(a1)
-; ILP32D-NEXT:    fsw ft3, %lo(var+12)(a0)
-; ILP32D-NEXT:    fsw ft2, %lo(var+8)(a0)
-; ILP32D-NEXT:    fsw ft1, %lo(var+4)(a0)
-; ILP32D-NEXT:    fsw ft0, %lo(var)(a0)
+; ILP32D-NEXT:    fsw ft7, 52(a1)
+; ILP32D-NEXT:    fsw ft6, 48(a1)
+; ILP32D-NEXT:    fsw ft5, 44(a1)
+; ILP32D-NEXT:    fsw ft4, 40(a1)
+; ILP32D-NEXT:    fsw ft3, 36(a1)
+; ILP32D-NEXT:    fsw ft2, 32(a1)
+; ILP32D-NEXT:    fsw ft1, 28(a1)
+; ILP32D-NEXT:    fsw ft0, 24(a1)
+; ILP32D-NEXT:    fsw fa0, 20(a1)
+; ILP32D-NEXT:    fsw fa1, 16(a1)
+; ILP32D-NEXT:    fsw fa2, %lo(var+12)(a0)
+; ILP32D-NEXT:    fsw fa3, %lo(var+8)(a0)
+; ILP32D-NEXT:    fsw fa4, %lo(var+4)(a0)
+; ILP32D-NEXT:    fsw fa5, %lo(var)(a0)
 ; ILP32D-NEXT:    fld fs0, 88(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fld fs1, 80(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fld fs2, 72(sp) # 8-byte Folded Reload
@@ -465,21 +465,21 @@ define void @callee() nounwind {
 ; LP64D-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fsd fs11, 0(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    lui a0, %hi(var)
-; LP64D-NEXT:    flw ft0, %lo(var)(a0)
-; LP64D-NEXT:    flw ft1, %lo(var+4)(a0)
-; LP64D-NEXT:    flw ft2, %lo(var+8)(a0)
-; LP64D-NEXT:    flw ft3, %lo(var+12)(a0)
+; LP64D-NEXT:    flw fa5, %lo(var)(a0)
+; LP64D-NEXT:    flw fa4, %lo(var+4)(a0)
+; LP64D-NEXT:    flw fa3, %lo(var+8)(a0)
+; LP64D-NEXT:    flw fa2, %lo(var+12)(a0)
 ; LP64D-NEXT:    addi a1, a0, %lo(var)
-; LP64D-NEXT:    flw ft4, 16(a1)
-; LP64D-NEXT:    flw ft5, 20(a1)
-; LP64D-NEXT:    flw ft6, 24(a1)
-; LP64D-NEXT:    flw ft7, 28(a1)
-; LP64D-NEXT:    flw fa0, 32(a1)
-; LP64D-NEXT:    flw fa1, 36(a1)
-; LP64D-NEXT:    flw fa2, 40(a1)
-; LP64D-NEXT:    flw fa3, 44(a1)
-; LP64D-NEXT:    flw fa4, 48(a1)
-; LP64D-NEXT:    flw fa5, 52(a1)
+; LP64D-NEXT:    flw fa1, 16(a1)
+; LP64D-NEXT:    flw fa0, 20(a1)
+; LP64D-NEXT:    flw ft0, 24(a1)
+; LP64D-NEXT:    flw ft1, 28(a1)
+; LP64D-NEXT:    flw ft2, 32(a1)
+; LP64D-NEXT:    flw ft3, 36(a1)
+; LP64D-NEXT:    flw ft4, 40(a1)
+; LP64D-NEXT:    flw ft5, 44(a1)
+; LP64D-NEXT:    flw ft6, 48(a1)
+; LP64D-NEXT:    flw ft7, 52(a1)
 ; LP64D-NEXT:    flw fa6, 56(a1)
 ; LP64D-NEXT:    flw fa7, 60(a1)
 ; LP64D-NEXT:    flw ft8, 64(a1)
@@ -516,20 +516,20 @@ define void @callee() nounwind {
 ; LP64D-NEXT:    fsw ft8, 64(a1)
 ; LP64D-NEXT:    fsw fa7, 60(a1)
 ; LP64D-NEXT:    fsw fa6, 56(a1)
-; LP64D-NEXT:    fsw fa5, 52(a1)
-; LP64D-NEXT:    fsw fa4, 48(a1)
-; LP64D-NEXT:    fsw fa3, 44(a1)
-; LP64D-NEXT:    fsw fa2, 40(a1)
-; LP64D-NEXT:    fsw fa1, 36(a1)
-; LP64D-NEXT:    fsw fa0, 32(a1)
-; LP64D-NEXT:    fsw ft7, 28(a1)
-; LP64D-NEXT:    fsw ft6, 24(a1)
-; LP64D-NEXT:    fsw ft5, 20(a1)
-; LP64D-NEXT:    fsw ft4, 16(a1)
-; LP64D-NEXT:    fsw ft3, %lo(var+12)(a0)
-; LP64D-NEXT:    fsw ft2, %lo(var+8)(a0)
-; LP64D-NEXT:    fsw ft1, %lo(var+4)(a0)
-; LP64D-NEXT:    fsw ft0, %lo(var)(a0)
+; LP64D-NEXT:    fsw ft7, 52(a1)
+; LP64D-NEXT:    fsw ft6, 48(a1)
+; LP64D-NEXT:    fsw ft5, 44(a1)
+; LP64D-NEXT:    fsw ft4, 40(a1)
+; LP64D-NEXT:    fsw ft3, 36(a1)
+; LP64D-NEXT:    fsw ft2, 32(a1)
+; LP64D-NEXT:    fsw ft1, 28(a1)
+; LP64D-NEXT:    fsw ft0, 24(a1)
+; LP64D-NEXT:    fsw fa0, 20(a1)
+; LP64D-NEXT:    fsw fa1, 16(a1)
+; LP64D-NEXT:    fsw fa2, %lo(var+12)(a0)
+; LP64D-NEXT:    fsw fa3, %lo(var+8)(a0)
+; LP64D-NEXT:    fsw fa4, %lo(var+4)(a0)
+; LP64D-NEXT:    fsw fa5, %lo(var)(a0)
 ; LP64D-NEXT:    fld fs0, 88(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fld fs1, 80(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fld fs2, 72(sp) # 8-byte Folded Reload
@@ -564,136 +564,136 @@ define void @caller() nounwind {
 ; ILP32-NEXT:    sw s0, 136(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    sw s1, 132(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    lui s0, %hi(var)
-; ILP32-NEXT:    flw ft0, %lo(var)(s0)
-; ILP32-NEXT:    fsw ft0, 128(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, %lo(var+4)(s0)
-; ILP32-NEXT:    fsw ft0, 124(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, %lo(var+8)(s0)
-; ILP32-NEXT:    fsw ft0, 120(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, %lo(var+12)(s0)
-; ILP32-NEXT:    fsw ft0, 116(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, %lo(var)(s0)
+; ILP32-NEXT:    fsw fa5, 128(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, %lo(var+4)(s0)
+; ILP32-NEXT:    fsw fa5, 124(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, %lo(var+8)(s0)
+; ILP32-NEXT:    fsw fa5, 120(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, %lo(var+12)(s0)
+; ILP32-NEXT:    fsw fa5, 116(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    addi s1, s0, %lo(var)
-; ILP32-NEXT:    flw ft0, 16(s1)
-; ILP32-NEXT:    fsw ft0, 112(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 20(s1)
-; ILP32-NEXT:    fsw ft0, 108(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 24(s1)
-; ILP32-NEXT:    fsw ft0, 104(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 28(s1)
-; ILP32-NEXT:    fsw ft0, 100(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 32(s1)
-; ILP32-NEXT:    fsw ft0, 96(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 36(s1)
-; ILP32-NEXT:    fsw ft0, 92(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 40(s1)
-; ILP32-NEXT:    fsw ft0, 88(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 44(s1)
-; ILP32-NEXT:    fsw ft0, 84(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 48(s1)
-; ILP32-NEXT:    fsw ft0, 80(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 52(s1)
-; ILP32-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 56(s1)
-; ILP32-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 60(s1)
-; ILP32-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 64(s1)
-; ILP32-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 68(s1)
-; ILP32-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 72(s1)
-; ILP32-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 76(s1)
-; ILP32-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 80(s1)
-; ILP32-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 84(s1)
-; ILP32-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 88(s1)
-; ILP32-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 92(s1)
-; ILP32-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 96(s1)
-; ILP32-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 100(s1)
-; ILP32-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 104(s1)
-; ILP32-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 108(s1)
-; ILP32-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 112(s1)
-; ILP32-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 116(s1)
-; ILP32-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 120(s1)
-; ILP32-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
-; ILP32-NEXT:    flw ft0, 124(s1)
-; ILP32-NEXT:    fsw ft0, 4(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 16(s1)
+; ILP32-NEXT:    fsw fa5, 112(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 20(s1)
+; ILP32-NEXT:    fsw fa5, 108(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 24(s1)
+; ILP32-NEXT:    fsw fa5, 104(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 28(s1)
+; ILP32-NEXT:    fsw fa5, 100(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 32(s1)
+; ILP32-NEXT:    fsw fa5, 96(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 36(s1)
+; ILP32-NEXT:    fsw fa5, 92(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 40(s1)
+; ILP32-NEXT:    fsw fa5, 88(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 44(s1)
+; ILP32-NEXT:    fsw fa5, 84(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 48(s1)
+; ILP32-NEXT:    fsw fa5, 80(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 52(s1)
+; ILP32-NEXT:    fsw fa5, 76(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 56(s1)
+; ILP32-NEXT:    fsw fa5, 72(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 60(s1)
+; ILP32-NEXT:    fsw fa5, 68(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 64(s1)
+; ILP32-NEXT:    fsw fa5, 64(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 68(s1)
+; ILP32-NEXT:    fsw fa5, 60(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 72(s1)
+; ILP32-NEXT:    fsw fa5, 56(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 76(s1)
+; ILP32-NEXT:    fsw fa5, 52(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 80(s1)
+; ILP32-NEXT:    fsw fa5, 48(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 84(s1)
+; ILP32-NEXT:    fsw fa5, 44(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 88(s1)
+; ILP32-NEXT:    fsw fa5, 40(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 92(s1)
+; ILP32-NEXT:    fsw fa5, 36(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 96(s1)
+; ILP32-NEXT:    fsw fa5, 32(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 100(s1)
+; ILP32-NEXT:    fsw fa5, 28(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 104(s1)
+; ILP32-NEXT:    fsw fa5, 24(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 108(s1)
+; ILP32-NEXT:    fsw fa5, 20(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 112(s1)
+; ILP32-NEXT:    fsw fa5, 16(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 116(s1)
+; ILP32-NEXT:    fsw fa5, 12(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 120(s1)
+; ILP32-NEXT:    fsw fa5, 8(sp) # 4-byte Folded Spill
+; ILP32-NEXT:    flw fa5, 124(s1)
+; ILP32-NEXT:    fsw fa5, 4(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    call callee at plt
-; ILP32-NEXT:    flw ft0, 4(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 124(s1)
-; ILP32-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 120(s1)
-; ILP32-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 116(s1)
-; ILP32-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 112(s1)
-; ILP32-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 108(s1)
-; ILP32-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 104(s1)
-; ILP32-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 100(s1)
-; ILP32-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 96(s1)
-; ILP32-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 92(s1)
-; ILP32-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 88(s1)
-; ILP32-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 84(s1)
-; ILP32-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 80(s1)
-; ILP32-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 76(s1)
-; ILP32-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 72(s1)
-; ILP32-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 68(s1)
-; ILP32-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 64(s1)
-; ILP32-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 60(s1)
-; ILP32-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 56(s1)
-; ILP32-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 52(s1)
-; ILP32-NEXT:    flw ft0, 80(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 48(s1)
-; ILP32-NEXT:    flw ft0, 84(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 44(s1)
-; ILP32-NEXT:    flw ft0, 88(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 40(s1)
-; ILP32-NEXT:    flw ft0, 92(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 36(s1)
-; ILP32-NEXT:    flw ft0, 96(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 32(s1)
-; ILP32-NEXT:    flw ft0, 100(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 28(s1)
-; ILP32-NEXT:    flw ft0, 104(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 24(s1)
-; ILP32-NEXT:    flw ft0, 108(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 20(s1)
-; ILP32-NEXT:    flw ft0, 112(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, 16(s1)
-; ILP32-NEXT:    flw ft0, 116(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, %lo(var+12)(s0)
-; ILP32-NEXT:    flw ft0, 120(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, %lo(var+8)(s0)
-; ILP32-NEXT:    flw ft0, 124(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, %lo(var+4)(s0)
-; ILP32-NEXT:    flw ft0, 128(sp) # 4-byte Folded Reload
-; ILP32-NEXT:    fsw ft0, %lo(var)(s0)
+; ILP32-NEXT:    flw fa5, 4(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 124(s1)
+; ILP32-NEXT:    flw fa5, 8(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 120(s1)
+; ILP32-NEXT:    flw fa5, 12(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 116(s1)
+; ILP32-NEXT:    flw fa5, 16(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 112(s1)
+; ILP32-NEXT:    flw fa5, 20(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 108(s1)
+; ILP32-NEXT:    flw fa5, 24(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 104(s1)
+; ILP32-NEXT:    flw fa5, 28(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 100(s1)
+; ILP32-NEXT:    flw fa5, 32(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 96(s1)
+; ILP32-NEXT:    flw fa5, 36(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 92(s1)
+; ILP32-NEXT:    flw fa5, 40(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 88(s1)
+; ILP32-NEXT:    flw fa5, 44(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 84(s1)
+; ILP32-NEXT:    flw fa5, 48(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 80(s1)
+; ILP32-NEXT:    flw fa5, 52(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 76(s1)
+; ILP32-NEXT:    flw fa5, 56(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 72(s1)
+; ILP32-NEXT:    flw fa5, 60(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 68(s1)
+; ILP32-NEXT:    flw fa5, 64(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 64(s1)
+; ILP32-NEXT:    flw fa5, 68(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 60(s1)
+; ILP32-NEXT:    flw fa5, 72(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 56(s1)
+; ILP32-NEXT:    flw fa5, 76(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 52(s1)
+; ILP32-NEXT:    flw fa5, 80(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 48(s1)
+; ILP32-NEXT:    flw fa5, 84(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 44(s1)
+; ILP32-NEXT:    flw fa5, 88(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 40(s1)
+; ILP32-NEXT:    flw fa5, 92(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 36(s1)
+; ILP32-NEXT:    flw fa5, 96(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 32(s1)
+; ILP32-NEXT:    flw fa5, 100(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 28(s1)
+; ILP32-NEXT:    flw fa5, 104(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 24(s1)
+; ILP32-NEXT:    flw fa5, 108(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 20(s1)
+; ILP32-NEXT:    flw fa5, 112(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, 16(s1)
+; ILP32-NEXT:    flw fa5, 116(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, %lo(var+12)(s0)
+; ILP32-NEXT:    flw fa5, 120(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, %lo(var+8)(s0)
+; ILP32-NEXT:    flw fa5, 124(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, %lo(var+4)(s0)
+; ILP32-NEXT:    flw fa5, 128(sp) # 4-byte Folded Reload
+; ILP32-NEXT:    fsw fa5, %lo(var)(s0)
 ; ILP32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
@@ -707,136 +707,136 @@ define void @caller() nounwind {
 ; LP64-NEXT:    sd s0, 144(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    sd s1, 136(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    lui s0, %hi(var)
-; LP64-NEXT:    flw ft0, %lo(var)(s0)
-; LP64-NEXT:    fsw ft0, 132(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, %lo(var+4)(s0)
-; LP64-NEXT:    fsw ft0, 128(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, %lo(var+8)(s0)
-; LP64-NEXT:    fsw ft0, 124(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, %lo(var+12)(s0)
-; LP64-NEXT:    fsw ft0, 120(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, %lo(var)(s0)
+; LP64-NEXT:    fsw fa5, 132(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, %lo(var+4)(s0)
+; LP64-NEXT:    fsw fa5, 128(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, %lo(var+8)(s0)
+; LP64-NEXT:    fsw fa5, 124(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, %lo(var+12)(s0)
+; LP64-NEXT:    fsw fa5, 120(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    addi s1, s0, %lo(var)
-; LP64-NEXT:    flw ft0, 16(s1)
-; LP64-NEXT:    fsw ft0, 116(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 20(s1)
-; LP64-NEXT:    fsw ft0, 112(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 24(s1)
-; LP64-NEXT:    fsw ft0, 108(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 28(s1)
-; LP64-NEXT:    fsw ft0, 104(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 32(s1)
-; LP64-NEXT:    fsw ft0, 100(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 36(s1)
-; LP64-NEXT:    fsw ft0, 96(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 40(s1)
-; LP64-NEXT:    fsw ft0, 92(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 44(s1)
-; LP64-NEXT:    fsw ft0, 88(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 48(s1)
-; LP64-NEXT:    fsw ft0, 84(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 52(s1)
-; LP64-NEXT:    fsw ft0, 80(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 56(s1)
-; LP64-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 60(s1)
-; LP64-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 64(s1)
-; LP64-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 68(s1)
-; LP64-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 72(s1)
-; LP64-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 76(s1)
-; LP64-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 80(s1)
-; LP64-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 84(s1)
-; LP64-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 88(s1)
-; LP64-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 92(s1)
-; LP64-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 96(s1)
-; LP64-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 100(s1)
-; LP64-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 104(s1)
-; LP64-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 108(s1)
-; LP64-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 112(s1)
-; LP64-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 116(s1)
-; LP64-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 120(s1)
-; LP64-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
-; LP64-NEXT:    flw ft0, 124(s1)
-; LP64-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 16(s1)
+; LP64-NEXT:    fsw fa5, 116(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 20(s1)
+; LP64-NEXT:    fsw fa5, 112(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 24(s1)
+; LP64-NEXT:    fsw fa5, 108(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 28(s1)
+; LP64-NEXT:    fsw fa5, 104(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 32(s1)
+; LP64-NEXT:    fsw fa5, 100(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 36(s1)
+; LP64-NEXT:    fsw fa5, 96(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 40(s1)
+; LP64-NEXT:    fsw fa5, 92(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 44(s1)
+; LP64-NEXT:    fsw fa5, 88(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 48(s1)
+; LP64-NEXT:    fsw fa5, 84(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 52(s1)
+; LP64-NEXT:    fsw fa5, 80(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 56(s1)
+; LP64-NEXT:    fsw fa5, 76(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 60(s1)
+; LP64-NEXT:    fsw fa5, 72(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 64(s1)
+; LP64-NEXT:    fsw fa5, 68(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 68(s1)
+; LP64-NEXT:    fsw fa5, 64(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 72(s1)
+; LP64-NEXT:    fsw fa5, 60(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 76(s1)
+; LP64-NEXT:    fsw fa5, 56(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 80(s1)
+; LP64-NEXT:    fsw fa5, 52(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 84(s1)
+; LP64-NEXT:    fsw fa5, 48(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 88(s1)
+; LP64-NEXT:    fsw fa5, 44(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 92(s1)
+; LP64-NEXT:    fsw fa5, 40(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 96(s1)
+; LP64-NEXT:    fsw fa5, 36(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 100(s1)
+; LP64-NEXT:    fsw fa5, 32(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 104(s1)
+; LP64-NEXT:    fsw fa5, 28(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 108(s1)
+; LP64-NEXT:    fsw fa5, 24(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 112(s1)
+; LP64-NEXT:    fsw fa5, 20(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 116(s1)
+; LP64-NEXT:    fsw fa5, 16(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 120(s1)
+; LP64-NEXT:    fsw fa5, 12(sp) # 4-byte Folded Spill
+; LP64-NEXT:    flw fa5, 124(s1)
+; LP64-NEXT:    fsw fa5, 8(sp) # 4-byte Folded Spill
 ; LP64-NEXT:    call callee at plt
-; LP64-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 124(s1)
-; LP64-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 120(s1)
-; LP64-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 116(s1)
-; LP64-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 112(s1)
-; LP64-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 108(s1)
-; LP64-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 104(s1)
-; LP64-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 100(s1)
-; LP64-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 96(s1)
-; LP64-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 92(s1)
-; LP64-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 88(s1)
-; LP64-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 84(s1)
-; LP64-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 80(s1)
-; LP64-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 76(s1)
-; LP64-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 72(s1)
-; LP64-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 68(s1)
-; LP64-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 64(s1)
-; LP64-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 60(s1)
-; LP64-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 56(s1)
-; LP64-NEXT:    flw ft0, 80(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 52(s1)
-; LP64-NEXT:    flw ft0, 84(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 48(s1)
-; LP64-NEXT:    flw ft0, 88(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 44(s1)
-; LP64-NEXT:    flw ft0, 92(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 40(s1)
-; LP64-NEXT:    flw ft0, 96(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 36(s1)
-; LP64-NEXT:    flw ft0, 100(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 32(s1)
-; LP64-NEXT:    flw ft0, 104(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 28(s1)
-; LP64-NEXT:    flw ft0, 108(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 24(s1)
-; LP64-NEXT:    flw ft0, 112(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 20(s1)
-; LP64-NEXT:    flw ft0, 116(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, 16(s1)
-; LP64-NEXT:    flw ft0, 120(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, %lo(var+12)(s0)
-; LP64-NEXT:    flw ft0, 124(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, %lo(var+8)(s0)
-; LP64-NEXT:    flw ft0, 128(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, %lo(var+4)(s0)
-; LP64-NEXT:    flw ft0, 132(sp) # 4-byte Folded Reload
-; LP64-NEXT:    fsw ft0, %lo(var)(s0)
+; LP64-NEXT:    flw fa5, 8(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 124(s1)
+; LP64-NEXT:    flw fa5, 12(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 120(s1)
+; LP64-NEXT:    flw fa5, 16(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 116(s1)
+; LP64-NEXT:    flw fa5, 20(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 112(s1)
+; LP64-NEXT:    flw fa5, 24(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 108(s1)
+; LP64-NEXT:    flw fa5, 28(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 104(s1)
+; LP64-NEXT:    flw fa5, 32(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 100(s1)
+; LP64-NEXT:    flw fa5, 36(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 96(s1)
+; LP64-NEXT:    flw fa5, 40(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 92(s1)
+; LP64-NEXT:    flw fa5, 44(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 88(s1)
+; LP64-NEXT:    flw fa5, 48(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 84(s1)
+; LP64-NEXT:    flw fa5, 52(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 80(s1)
+; LP64-NEXT:    flw fa5, 56(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 76(s1)
+; LP64-NEXT:    flw fa5, 60(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 72(s1)
+; LP64-NEXT:    flw fa5, 64(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 68(s1)
+; LP64-NEXT:    flw fa5, 68(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 64(s1)
+; LP64-NEXT:    flw fa5, 72(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 60(s1)
+; LP64-NEXT:    flw fa5, 76(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 56(s1)
+; LP64-NEXT:    flw fa5, 80(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 52(s1)
+; LP64-NEXT:    flw fa5, 84(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 48(s1)
+; LP64-NEXT:    flw fa5, 88(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 44(s1)
+; LP64-NEXT:    flw fa5, 92(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 40(s1)
+; LP64-NEXT:    flw fa5, 96(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 36(s1)
+; LP64-NEXT:    flw fa5, 100(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 32(s1)
+; LP64-NEXT:    flw fa5, 104(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 28(s1)
+; LP64-NEXT:    flw fa5, 108(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 24(s1)
+; LP64-NEXT:    flw fa5, 112(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 20(s1)
+; LP64-NEXT:    flw fa5, 116(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, 16(s1)
+; LP64-NEXT:    flw fa5, 120(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, %lo(var+12)(s0)
+; LP64-NEXT:    flw fa5, 124(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, %lo(var+8)(s0)
+; LP64-NEXT:    flw fa5, 128(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, %lo(var+4)(s0)
+; LP64-NEXT:    flw fa5, 132(sp) # 4-byte Folded Reload
+; LP64-NEXT:    fsw fa5, %lo(var)(s0)
 ; LP64-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    ld s0, 144(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    ld s1, 136(sp) # 8-byte Folded Reload
@@ -862,47 +862,47 @@ define void @caller() nounwind {
 ; ILP32F-NEXT:    fsw fs10, 88(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    fsw fs11, 84(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    lui s0, %hi(var)
-; ILP32F-NEXT:    flw ft0, %lo(var)(s0)
-; ILP32F-NEXT:    fsw ft0, 80(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, %lo(var+4)(s0)
-; ILP32F-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, %lo(var+8)(s0)
-; ILP32F-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, %lo(var+12)(s0)
-; ILP32F-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, %lo(var)(s0)
+; ILP32F-NEXT:    fsw fa5, 80(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, %lo(var+4)(s0)
+; ILP32F-NEXT:    fsw fa5, 76(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, %lo(var+8)(s0)
+; ILP32F-NEXT:    fsw fa5, 72(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, %lo(var+12)(s0)
+; ILP32F-NEXT:    fsw fa5, 68(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    addi s1, s0, %lo(var)
-; ILP32F-NEXT:    flw ft0, 16(s1)
-; ILP32F-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 20(s1)
-; ILP32F-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 24(s1)
-; ILP32F-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 28(s1)
-; ILP32F-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 32(s1)
-; ILP32F-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 36(s1)
-; ILP32F-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 40(s1)
-; ILP32F-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 44(s1)
-; ILP32F-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 48(s1)
-; ILP32F-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 52(s1)
-; ILP32F-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 56(s1)
-; ILP32F-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 60(s1)
-; ILP32F-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 64(s1)
-; ILP32F-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 68(s1)
-; ILP32F-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 72(s1)
-; ILP32F-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
-; ILP32F-NEXT:    flw ft0, 76(s1)
-; ILP32F-NEXT:    fsw ft0, 4(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 16(s1)
+; ILP32F-NEXT:    fsw fa5, 64(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 20(s1)
+; ILP32F-NEXT:    fsw fa5, 60(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 24(s1)
+; ILP32F-NEXT:    fsw fa5, 56(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 28(s1)
+; ILP32F-NEXT:    fsw fa5, 52(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 32(s1)
+; ILP32F-NEXT:    fsw fa5, 48(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 36(s1)
+; ILP32F-NEXT:    fsw fa5, 44(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 40(s1)
+; ILP32F-NEXT:    fsw fa5, 40(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 44(s1)
+; ILP32F-NEXT:    fsw fa5, 36(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 48(s1)
+; ILP32F-NEXT:    fsw fa5, 32(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 52(s1)
+; ILP32F-NEXT:    fsw fa5, 28(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 56(s1)
+; ILP32F-NEXT:    fsw fa5, 24(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 60(s1)
+; ILP32F-NEXT:    fsw fa5, 20(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 64(s1)
+; ILP32F-NEXT:    fsw fa5, 16(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 68(s1)
+; ILP32F-NEXT:    fsw fa5, 12(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 72(s1)
+; ILP32F-NEXT:    fsw fa5, 8(sp) # 4-byte Folded Spill
+; ILP32F-NEXT:    flw fa5, 76(s1)
+; ILP32F-NEXT:    fsw fa5, 4(sp) # 4-byte Folded Spill
 ; ILP32F-NEXT:    flw fs8, 80(s1)
 ; ILP32F-NEXT:    flw fs9, 84(s1)
 ; ILP32F-NEXT:    flw fs10, 88(s1)
@@ -928,46 +928,46 @@ define void @caller() nounwind {
 ; ILP32F-NEXT:    fsw fs10, 88(s1)
 ; ILP32F-NEXT:    fsw fs9, 84(s1)
 ; ILP32F-NEXT:    fsw fs8, 80(s1)
-; ILP32F-NEXT:    flw ft0, 4(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 76(s1)
-; ILP32F-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 72(s1)
-; ILP32F-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 68(s1)
-; ILP32F-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 64(s1)
-; ILP32F-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 60(s1)
-; ILP32F-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 56(s1)
-; ILP32F-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 52(s1)
-; ILP32F-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 48(s1)
-; ILP32F-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 44(s1)
-; ILP32F-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 40(s1)
-; ILP32F-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 36(s1)
-; ILP32F-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 32(s1)
-; ILP32F-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 28(s1)
-; ILP32F-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 24(s1)
-; ILP32F-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 20(s1)
-; ILP32F-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, 16(s1)
-; ILP32F-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, %lo(var+12)(s0)
-; ILP32F-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, %lo(var+8)(s0)
-; ILP32F-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, %lo(var+4)(s0)
-; ILP32F-NEXT:    flw ft0, 80(sp) # 4-byte Folded Reload
-; ILP32F-NEXT:    fsw ft0, %lo(var)(s0)
+; ILP32F-NEXT:    flw fa5, 4(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 76(s1)
+; ILP32F-NEXT:    flw fa5, 8(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 72(s1)
+; ILP32F-NEXT:    flw fa5, 12(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 68(s1)
+; ILP32F-NEXT:    flw fa5, 16(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 64(s1)
+; ILP32F-NEXT:    flw fa5, 20(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 60(s1)
+; ILP32F-NEXT:    flw fa5, 24(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 56(s1)
+; ILP32F-NEXT:    flw fa5, 28(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 52(s1)
+; ILP32F-NEXT:    flw fa5, 32(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 48(s1)
+; ILP32F-NEXT:    flw fa5, 36(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 44(s1)
+; ILP32F-NEXT:    flw fa5, 40(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 40(s1)
+; ILP32F-NEXT:    flw fa5, 44(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 36(s1)
+; ILP32F-NEXT:    flw fa5, 48(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 32(s1)
+; ILP32F-NEXT:    flw fa5, 52(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 28(s1)
+; ILP32F-NEXT:    flw fa5, 56(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 24(s1)
+; ILP32F-NEXT:    flw fa5, 60(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 20(s1)
+; ILP32F-NEXT:    flw fa5, 64(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, 16(s1)
+; ILP32F-NEXT:    flw fa5, 68(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, %lo(var+12)(s0)
+; ILP32F-NEXT:    flw fa5, 72(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, %lo(var+8)(s0)
+; ILP32F-NEXT:    flw fa5, 76(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, %lo(var+4)(s0)
+; ILP32F-NEXT:    flw fa5, 80(sp) # 4-byte Folded Reload
+; ILP32F-NEXT:    fsw fa5, %lo(var)(s0)
 ; ILP32F-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
 ; ILP32F-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
@@ -1005,47 +1005,47 @@ define void @caller() nounwind {
 ; LP64F-NEXT:    fsw fs10, 92(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    fsw fs11, 88(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    lui s0, %hi(var)
-; LP64F-NEXT:    flw ft0, %lo(var)(s0)
-; LP64F-NEXT:    fsw ft0, 84(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, %lo(var+4)(s0)
-; LP64F-NEXT:    fsw ft0, 80(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, %lo(var+8)(s0)
-; LP64F-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, %lo(var+12)(s0)
-; LP64F-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, %lo(var)(s0)
+; LP64F-NEXT:    fsw fa5, 84(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, %lo(var+4)(s0)
+; LP64F-NEXT:    fsw fa5, 80(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, %lo(var+8)(s0)
+; LP64F-NEXT:    fsw fa5, 76(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, %lo(var+12)(s0)
+; LP64F-NEXT:    fsw fa5, 72(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    addi s1, s0, %lo(var)
-; LP64F-NEXT:    flw ft0, 16(s1)
-; LP64F-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 20(s1)
-; LP64F-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 24(s1)
-; LP64F-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 28(s1)
-; LP64F-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 32(s1)
-; LP64F-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 36(s1)
-; LP64F-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 40(s1)
-; LP64F-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 44(s1)
-; LP64F-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 48(s1)
-; LP64F-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 52(s1)
-; LP64F-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 56(s1)
-; LP64F-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 60(s1)
-; LP64F-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 64(s1)
-; LP64F-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 68(s1)
-; LP64F-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 72(s1)
-; LP64F-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
-; LP64F-NEXT:    flw ft0, 76(s1)
-; LP64F-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 16(s1)
+; LP64F-NEXT:    fsw fa5, 68(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 20(s1)
+; LP64F-NEXT:    fsw fa5, 64(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 24(s1)
+; LP64F-NEXT:    fsw fa5, 60(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 28(s1)
+; LP64F-NEXT:    fsw fa5, 56(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 32(s1)
+; LP64F-NEXT:    fsw fa5, 52(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 36(s1)
+; LP64F-NEXT:    fsw fa5, 48(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 40(s1)
+; LP64F-NEXT:    fsw fa5, 44(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 44(s1)
+; LP64F-NEXT:    fsw fa5, 40(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 48(s1)
+; LP64F-NEXT:    fsw fa5, 36(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 52(s1)
+; LP64F-NEXT:    fsw fa5, 32(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 56(s1)
+; LP64F-NEXT:    fsw fa5, 28(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 60(s1)
+; LP64F-NEXT:    fsw fa5, 24(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 64(s1)
+; LP64F-NEXT:    fsw fa5, 20(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 68(s1)
+; LP64F-NEXT:    fsw fa5, 16(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 72(s1)
+; LP64F-NEXT:    fsw fa5, 12(sp) # 4-byte Folded Spill
+; LP64F-NEXT:    flw fa5, 76(s1)
+; LP64F-NEXT:    fsw fa5, 8(sp) # 4-byte Folded Spill
 ; LP64F-NEXT:    flw fs8, 80(s1)
 ; LP64F-NEXT:    flw fs9, 84(s1)
 ; LP64F-NEXT:    flw fs10, 88(s1)
@@ -1071,46 +1071,46 @@ define void @caller() nounwind {
 ; LP64F-NEXT:    fsw fs10, 88(s1)
 ; LP64F-NEXT:    fsw fs9, 84(s1)
 ; LP64F-NEXT:    fsw fs8, 80(s1)
-; LP64F-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 76(s1)
-; LP64F-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 72(s1)
-; LP64F-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 68(s1)
-; LP64F-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 64(s1)
-; LP64F-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 60(s1)
-; LP64F-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 56(s1)
-; LP64F-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 52(s1)
-; LP64F-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 48(s1)
-; LP64F-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 44(s1)
-; LP64F-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 40(s1)
-; LP64F-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 36(s1)
-; LP64F-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 32(s1)
-; LP64F-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 28(s1)
-; LP64F-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 24(s1)
-; LP64F-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 20(s1)
-; LP64F-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, 16(s1)
-; LP64F-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, %lo(var+12)(s0)
-; LP64F-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, %lo(var+8)(s0)
-; LP64F-NEXT:    flw ft0, 80(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, %lo(var+4)(s0)
-; LP64F-NEXT:    flw ft0, 84(sp) # 4-byte Folded Reload
-; LP64F-NEXT:    fsw ft0, %lo(var)(s0)
+; LP64F-NEXT:    flw fa5, 8(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 76(s1)
+; LP64F-NEXT:    flw fa5, 12(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 72(s1)
+; LP64F-NEXT:    flw fa5, 16(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 68(s1)
+; LP64F-NEXT:    flw fa5, 20(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 64(s1)
+; LP64F-NEXT:    flw fa5, 24(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 60(s1)
+; LP64F-NEXT:    flw fa5, 28(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 56(s1)
+; LP64F-NEXT:    flw fa5, 32(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 52(s1)
+; LP64F-NEXT:    flw fa5, 36(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 48(s1)
+; LP64F-NEXT:    flw fa5, 40(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 44(s1)
+; LP64F-NEXT:    flw fa5, 44(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 40(s1)
+; LP64F-NEXT:    flw fa5, 48(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 36(s1)
+; LP64F-NEXT:    flw fa5, 52(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 32(s1)
+; LP64F-NEXT:    flw fa5, 56(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 28(s1)
+; LP64F-NEXT:    flw fa5, 60(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 24(s1)
+; LP64F-NEXT:    flw fa5, 64(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 20(s1)
+; LP64F-NEXT:    flw fa5, 68(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, 16(s1)
+; LP64F-NEXT:    flw fa5, 72(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, %lo(var+12)(s0)
+; LP64F-NEXT:    flw fa5, 76(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, %lo(var+8)(s0)
+; LP64F-NEXT:    flw fa5, 80(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, %lo(var+4)(s0)
+; LP64F-NEXT:    flw fa5, 84(sp) # 4-byte Folded Reload
+; LP64F-NEXT:    fsw fa5, %lo(var)(s0)
 ; LP64F-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
 ; LP64F-NEXT:    ld s0, 144(sp) # 8-byte Folded Reload
 ; LP64F-NEXT:    ld s1, 136(sp) # 8-byte Folded Reload
@@ -1148,47 +1148,47 @@ define void @caller() nounwind {
 ; ILP32D-NEXT:    fsd fs10, 88(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fsd fs11, 80(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    lui s0, %hi(var)
-; ILP32D-NEXT:    flw ft0, %lo(var)(s0)
-; ILP32D-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, %lo(var+4)(s0)
-; ILP32D-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, %lo(var+8)(s0)
-; ILP32D-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, %lo(var+12)(s0)
-; ILP32D-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, %lo(var)(s0)
+; ILP32D-NEXT:    fsw fa5, 76(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, %lo(var+4)(s0)
+; ILP32D-NEXT:    fsw fa5, 72(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, %lo(var+8)(s0)
+; ILP32D-NEXT:    fsw fa5, 68(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, %lo(var+12)(s0)
+; ILP32D-NEXT:    fsw fa5, 64(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    addi s1, s0, %lo(var)
-; ILP32D-NEXT:    flw ft0, 16(s1)
-; ILP32D-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 20(s1)
-; ILP32D-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 24(s1)
-; ILP32D-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 28(s1)
-; ILP32D-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 32(s1)
-; ILP32D-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 36(s1)
-; ILP32D-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 40(s1)
-; ILP32D-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 44(s1)
-; ILP32D-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 48(s1)
-; ILP32D-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 52(s1)
-; ILP32D-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 56(s1)
-; ILP32D-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 60(s1)
-; ILP32D-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 64(s1)
-; ILP32D-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 68(s1)
-; ILP32D-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 72(s1)
-; ILP32D-NEXT:    fsw ft0, 4(sp) # 4-byte Folded Spill
-; ILP32D-NEXT:    flw ft0, 76(s1)
-; ILP32D-NEXT:    fsw ft0, 0(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 16(s1)
+; ILP32D-NEXT:    fsw fa5, 60(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 20(s1)
+; ILP32D-NEXT:    fsw fa5, 56(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 24(s1)
+; ILP32D-NEXT:    fsw fa5, 52(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 28(s1)
+; ILP32D-NEXT:    fsw fa5, 48(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 32(s1)
+; ILP32D-NEXT:    fsw fa5, 44(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 36(s1)
+; ILP32D-NEXT:    fsw fa5, 40(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 40(s1)
+; ILP32D-NEXT:    fsw fa5, 36(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 44(s1)
+; ILP32D-NEXT:    fsw fa5, 32(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 48(s1)
+; ILP32D-NEXT:    fsw fa5, 28(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 52(s1)
+; ILP32D-NEXT:    fsw fa5, 24(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 56(s1)
+; ILP32D-NEXT:    fsw fa5, 20(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 60(s1)
+; ILP32D-NEXT:    fsw fa5, 16(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 64(s1)
+; ILP32D-NEXT:    fsw fa5, 12(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 68(s1)
+; ILP32D-NEXT:    fsw fa5, 8(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 72(s1)
+; ILP32D-NEXT:    fsw fa5, 4(sp) # 4-byte Folded Spill
+; ILP32D-NEXT:    flw fa5, 76(s1)
+; ILP32D-NEXT:    fsw fa5, 0(sp) # 4-byte Folded Spill
 ; ILP32D-NEXT:    flw fs8, 80(s1)
 ; ILP32D-NEXT:    flw fs9, 84(s1)
 ; ILP32D-NEXT:    flw fs10, 88(s1)
@@ -1214,46 +1214,46 @@ define void @caller() nounwind {
 ; ILP32D-NEXT:    fsw fs10, 88(s1)
 ; ILP32D-NEXT:    fsw fs9, 84(s1)
 ; ILP32D-NEXT:    fsw fs8, 80(s1)
-; ILP32D-NEXT:    flw ft0, 0(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 76(s1)
-; ILP32D-NEXT:    flw ft0, 4(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 72(s1)
-; ILP32D-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 68(s1)
-; ILP32D-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 64(s1)
-; ILP32D-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 60(s1)
-; ILP32D-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 56(s1)
-; ILP32D-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 52(s1)
-; ILP32D-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 48(s1)
-; ILP32D-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 44(s1)
-; ILP32D-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 40(s1)
-; ILP32D-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 36(s1)
-; ILP32D-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 32(s1)
-; ILP32D-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 28(s1)
-; ILP32D-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 24(s1)
-; ILP32D-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 20(s1)
-; ILP32D-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, 16(s1)
-; ILP32D-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, %lo(var+12)(s0)
-; ILP32D-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, %lo(var+8)(s0)
-; ILP32D-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, %lo(var+4)(s0)
-; ILP32D-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
-; ILP32D-NEXT:    fsw ft0, %lo(var)(s0)
+; ILP32D-NEXT:    flw fa5, 0(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 76(s1)
+; ILP32D-NEXT:    flw fa5, 4(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 72(s1)
+; ILP32D-NEXT:    flw fa5, 8(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 68(s1)
+; ILP32D-NEXT:    flw fa5, 12(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 64(s1)
+; ILP32D-NEXT:    flw fa5, 16(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 60(s1)
+; ILP32D-NEXT:    flw fa5, 20(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 56(s1)
+; ILP32D-NEXT:    flw fa5, 24(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 52(s1)
+; ILP32D-NEXT:    flw fa5, 28(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 48(s1)
+; ILP32D-NEXT:    flw fa5, 32(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 44(s1)
+; ILP32D-NEXT:    flw fa5, 36(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 40(s1)
+; ILP32D-NEXT:    flw fa5, 40(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 36(s1)
+; ILP32D-NEXT:    flw fa5, 44(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 32(s1)
+; ILP32D-NEXT:    flw fa5, 48(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 28(s1)
+; ILP32D-NEXT:    flw fa5, 52(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 24(s1)
+; ILP32D-NEXT:    flw fa5, 56(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 20(s1)
+; ILP32D-NEXT:    flw fa5, 60(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, 16(s1)
+; ILP32D-NEXT:    flw fa5, 64(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, %lo(var+12)(s0)
+; ILP32D-NEXT:    flw fa5, 68(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, %lo(var+8)(s0)
+; ILP32D-NEXT:    flw fa5, 72(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, %lo(var+4)(s0)
+; ILP32D-NEXT:    flw fa5, 76(sp) # 4-byte Folded Reload
+; ILP32D-NEXT:    fsw fa5, %lo(var)(s0)
 ; ILP32D-NEXT:    lw ra, 188(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    lw s0, 184(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    lw s1, 180(sp) # 4-byte Folded Reload
@@ -1291,47 +1291,47 @@ define void @caller() nounwind {
 ; LP64D-NEXT:    fsd fs10, 96(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fsd fs11, 88(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    lui s0, %hi(var)
-; LP64D-NEXT:    flw ft0, %lo(var)(s0)
-; LP64D-NEXT:    fsw ft0, 84(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, %lo(var+4)(s0)
-; LP64D-NEXT:    fsw ft0, 80(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, %lo(var+8)(s0)
-; LP64D-NEXT:    fsw ft0, 76(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, %lo(var+12)(s0)
-; LP64D-NEXT:    fsw ft0, 72(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, %lo(var)(s0)
+; LP64D-NEXT:    fsw fa5, 84(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, %lo(var+4)(s0)
+; LP64D-NEXT:    fsw fa5, 80(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, %lo(var+8)(s0)
+; LP64D-NEXT:    fsw fa5, 76(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, %lo(var+12)(s0)
+; LP64D-NEXT:    fsw fa5, 72(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    addi s1, s0, %lo(var)
-; LP64D-NEXT:    flw ft0, 16(s1)
-; LP64D-NEXT:    fsw ft0, 68(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 20(s1)
-; LP64D-NEXT:    fsw ft0, 64(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 24(s1)
-; LP64D-NEXT:    fsw ft0, 60(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 28(s1)
-; LP64D-NEXT:    fsw ft0, 56(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 32(s1)
-; LP64D-NEXT:    fsw ft0, 52(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 36(s1)
-; LP64D-NEXT:    fsw ft0, 48(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 40(s1)
-; LP64D-NEXT:    fsw ft0, 44(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 44(s1)
-; LP64D-NEXT:    fsw ft0, 40(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 48(s1)
-; LP64D-NEXT:    fsw ft0, 36(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 52(s1)
-; LP64D-NEXT:    fsw ft0, 32(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 56(s1)
-; LP64D-NEXT:    fsw ft0, 28(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 60(s1)
-; LP64D-NEXT:    fsw ft0, 24(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 64(s1)
-; LP64D-NEXT:    fsw ft0, 20(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 68(s1)
-; LP64D-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 72(s1)
-; LP64D-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
-; LP64D-NEXT:    flw ft0, 76(s1)
-; LP64D-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 16(s1)
+; LP64D-NEXT:    fsw fa5, 68(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 20(s1)
+; LP64D-NEXT:    fsw fa5, 64(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 24(s1)
+; LP64D-NEXT:    fsw fa5, 60(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 28(s1)
+; LP64D-NEXT:    fsw fa5, 56(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 32(s1)
+; LP64D-NEXT:    fsw fa5, 52(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 36(s1)
+; LP64D-NEXT:    fsw fa5, 48(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 40(s1)
+; LP64D-NEXT:    fsw fa5, 44(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 44(s1)
+; LP64D-NEXT:    fsw fa5, 40(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 48(s1)
+; LP64D-NEXT:    fsw fa5, 36(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 52(s1)
+; LP64D-NEXT:    fsw fa5, 32(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 56(s1)
+; LP64D-NEXT:    fsw fa5, 28(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 60(s1)
+; LP64D-NEXT:    fsw fa5, 24(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 64(s1)
+; LP64D-NEXT:    fsw fa5, 20(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 68(s1)
+; LP64D-NEXT:    fsw fa5, 16(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 72(s1)
+; LP64D-NEXT:    fsw fa5, 12(sp) # 4-byte Folded Spill
+; LP64D-NEXT:    flw fa5, 76(s1)
+; LP64D-NEXT:    fsw fa5, 8(sp) # 4-byte Folded Spill
 ; LP64D-NEXT:    flw fs8, 80(s1)
 ; LP64D-NEXT:    flw fs9, 84(s1)
 ; LP64D-NEXT:    flw fs10, 88(s1)
@@ -1357,46 +1357,46 @@ define void @caller() nounwind {
 ; LP64D-NEXT:    fsw fs10, 88(s1)
 ; LP64D-NEXT:    fsw fs9, 84(s1)
 ; LP64D-NEXT:    fsw fs8, 80(s1)
-; LP64D-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 76(s1)
-; LP64D-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 72(s1)
-; LP64D-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 68(s1)
-; LP64D-NEXT:    flw ft0, 20(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 64(s1)
-; LP64D-NEXT:    flw ft0, 24(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 60(s1)
-; LP64D-NEXT:    flw ft0, 28(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 56(s1)
-; LP64D-NEXT:    flw ft0, 32(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 52(s1)
-; LP64D-NEXT:    flw ft0, 36(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 48(s1)
-; LP64D-NEXT:    flw ft0, 40(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 44(s1)
-; LP64D-NEXT:    flw ft0, 44(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 40(s1)
-; LP64D-NEXT:    flw ft0, 48(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 36(s1)
-; LP64D-NEXT:    flw ft0, 52(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 32(s1)
-; LP64D-NEXT:    flw ft0, 56(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 28(s1)
-; LP64D-NEXT:    flw ft0, 60(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 24(s1)
-; LP64D-NEXT:    flw ft0, 64(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 20(s1)
-; LP64D-NEXT:    flw ft0, 68(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, 16(s1)
-; LP64D-NEXT:    flw ft0, 72(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, %lo(var+12)(s0)
-; LP64D-NEXT:    flw ft0, 76(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, %lo(var+8)(s0)
-; LP64D-NEXT:    flw ft0, 80(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, %lo(var+4)(s0)
-; LP64D-NEXT:    flw ft0, 84(sp) # 4-byte Folded Reload
-; LP64D-NEXT:    fsw ft0, %lo(var)(s0)
+; LP64D-NEXT:    flw fa5, 8(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 76(s1)
+; LP64D-NEXT:    flw fa5, 12(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 72(s1)
+; LP64D-NEXT:    flw fa5, 16(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 68(s1)
+; LP64D-NEXT:    flw fa5, 20(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 64(s1)
+; LP64D-NEXT:    flw fa5, 24(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 60(s1)
+; LP64D-NEXT:    flw fa5, 28(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 56(s1)
+; LP64D-NEXT:    flw fa5, 32(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 52(s1)
+; LP64D-NEXT:    flw fa5, 36(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 48(s1)
+; LP64D-NEXT:    flw fa5, 40(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 44(s1)
+; LP64D-NEXT:    flw fa5, 44(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 40(s1)
+; LP64D-NEXT:    flw fa5, 48(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 36(s1)
+; LP64D-NEXT:    flw fa5, 52(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 32(s1)
+; LP64D-NEXT:    flw fa5, 56(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 28(s1)
+; LP64D-NEXT:    flw fa5, 60(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 24(s1)
+; LP64D-NEXT:    flw fa5, 64(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 20(s1)
+; LP64D-NEXT:    flw fa5, 68(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, 16(s1)
+; LP64D-NEXT:    flw fa5, 72(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, %lo(var+12)(s0)
+; LP64D-NEXT:    flw fa5, 76(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, %lo(var+8)(s0)
+; LP64D-NEXT:    flw fa5, 80(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, %lo(var+4)(s0)
+; LP64D-NEXT:    flw fa5, 84(sp) # 4-byte Folded Reload
+; LP64D-NEXT:    fsw fa5, %lo(var)(s0)
 ; LP64D-NEXT:    ld ra, 200(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    ld s0, 192(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    ld s1, 184(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
index 0e2ced0b17bf..40076316bca8 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
@@ -20,21 +20,21 @@ define void @callee() nounwind {
 ; ILP32-LABEL: callee:
 ; ILP32:       # %bb.0:
 ; ILP32-NEXT:    lui a0, %hi(var)
-; ILP32-NEXT:    fld ft0, %lo(var)(a0)
-; ILP32-NEXT:    fld ft1, %lo(var+8)(a0)
+; ILP32-NEXT:    fld fa5, %lo(var)(a0)
+; ILP32-NEXT:    fld fa4, %lo(var+8)(a0)
 ; ILP32-NEXT:    addi a1, a0, %lo(var)
-; ILP32-NEXT:    fld ft2, 16(a1)
-; ILP32-NEXT:    fld ft3, 24(a1)
-; ILP32-NEXT:    fld ft4, 32(a1)
-; ILP32-NEXT:    fld ft5, 40(a1)
-; ILP32-NEXT:    fld ft6, 48(a1)
-; ILP32-NEXT:    fld ft7, 56(a1)
-; ILP32-NEXT:    fld fa0, 64(a1)
-; ILP32-NEXT:    fld fa1, 72(a1)
-; ILP32-NEXT:    fld fa2, 80(a1)
-; ILP32-NEXT:    fld fa3, 88(a1)
-; ILP32-NEXT:    fld fa4, 96(a1)
-; ILP32-NEXT:    fld fa5, 104(a1)
+; ILP32-NEXT:    fld fa3, 16(a1)
+; ILP32-NEXT:    fld fa2, 24(a1)
+; ILP32-NEXT:    fld fa1, 32(a1)
+; ILP32-NEXT:    fld fa0, 40(a1)
+; ILP32-NEXT:    fld ft0, 48(a1)
+; ILP32-NEXT:    fld ft1, 56(a1)
+; ILP32-NEXT:    fld ft2, 64(a1)
+; ILP32-NEXT:    fld ft3, 72(a1)
+; ILP32-NEXT:    fld ft4, 80(a1)
+; ILP32-NEXT:    fld ft5, 88(a1)
+; ILP32-NEXT:    fld ft6, 96(a1)
+; ILP32-NEXT:    fld ft7, 104(a1)
 ; ILP32-NEXT:    fld fa6, 112(a1)
 ; ILP32-NEXT:    fld fa7, 120(a1)
 ; ILP32-NEXT:    fld ft8, 128(a1)
@@ -71,40 +71,40 @@ define void @callee() nounwind {
 ; ILP32-NEXT:    fsd ft8, 128(a1)
 ; ILP32-NEXT:    fsd fa7, 120(a1)
 ; ILP32-NEXT:    fsd fa6, 112(a1)
-; ILP32-NEXT:    fsd fa5, 104(a1)
-; ILP32-NEXT:    fsd fa4, 96(a1)
-; ILP32-NEXT:    fsd fa3, 88(a1)
-; ILP32-NEXT:    fsd fa2, 80(a1)
-; ILP32-NEXT:    fsd fa1, 72(a1)
-; ILP32-NEXT:    fsd fa0, 64(a1)
-; ILP32-NEXT:    fsd ft7, 56(a1)
-; ILP32-NEXT:    fsd ft6, 48(a1)
-; ILP32-NEXT:    fsd ft5, 40(a1)
-; ILP32-NEXT:    fsd ft4, 32(a1)
-; ILP32-NEXT:    fsd ft3, 24(a1)
-; ILP32-NEXT:    fsd ft2, 16(a1)
-; ILP32-NEXT:    fsd ft1, %lo(var+8)(a0)
-; ILP32-NEXT:    fsd ft0, %lo(var)(a0)
+; ILP32-NEXT:    fsd ft7, 104(a1)
+; ILP32-NEXT:    fsd ft6, 96(a1)
+; ILP32-NEXT:    fsd ft5, 88(a1)
+; ILP32-NEXT:    fsd ft4, 80(a1)
+; ILP32-NEXT:    fsd ft3, 72(a1)
+; ILP32-NEXT:    fsd ft2, 64(a1)
+; ILP32-NEXT:    fsd ft1, 56(a1)
+; ILP32-NEXT:    fsd ft0, 48(a1)
+; ILP32-NEXT:    fsd fa0, 40(a1)
+; ILP32-NEXT:    fsd fa1, 32(a1)
+; ILP32-NEXT:    fsd fa2, 24(a1)
+; ILP32-NEXT:    fsd fa3, 16(a1)
+; ILP32-NEXT:    fsd fa4, %lo(var+8)(a0)
+; ILP32-NEXT:    fsd fa5, %lo(var)(a0)
 ; ILP32-NEXT:    ret
 ;
 ; LP64-LABEL: callee:
 ; LP64:       # %bb.0:
 ; LP64-NEXT:    lui a0, %hi(var)
-; LP64-NEXT:    fld ft0, %lo(var)(a0)
-; LP64-NEXT:    fld ft1, %lo(var+8)(a0)
+; LP64-NEXT:    fld fa5, %lo(var)(a0)
+; LP64-NEXT:    fld fa4, %lo(var+8)(a0)
 ; LP64-NEXT:    addi a1, a0, %lo(var)
-; LP64-NEXT:    fld ft2, 16(a1)
-; LP64-NEXT:    fld ft3, 24(a1)
-; LP64-NEXT:    fld ft4, 32(a1)
-; LP64-NEXT:    fld ft5, 40(a1)
-; LP64-NEXT:    fld ft6, 48(a1)
-; LP64-NEXT:    fld ft7, 56(a1)
-; LP64-NEXT:    fld fa0, 64(a1)
-; LP64-NEXT:    fld fa1, 72(a1)
-; LP64-NEXT:    fld fa2, 80(a1)
-; LP64-NEXT:    fld fa3, 88(a1)
-; LP64-NEXT:    fld fa4, 96(a1)
-; LP64-NEXT:    fld fa5, 104(a1)
+; LP64-NEXT:    fld fa3, 16(a1)
+; LP64-NEXT:    fld fa2, 24(a1)
+; LP64-NEXT:    fld fa1, 32(a1)
+; LP64-NEXT:    fld fa0, 40(a1)
+; LP64-NEXT:    fld ft0, 48(a1)
+; LP64-NEXT:    fld ft1, 56(a1)
+; LP64-NEXT:    fld ft2, 64(a1)
+; LP64-NEXT:    fld ft3, 72(a1)
+; LP64-NEXT:    fld ft4, 80(a1)
+; LP64-NEXT:    fld ft5, 88(a1)
+; LP64-NEXT:    fld ft6, 96(a1)
+; LP64-NEXT:    fld ft7, 104(a1)
 ; LP64-NEXT:    fld fa6, 112(a1)
 ; LP64-NEXT:    fld fa7, 120(a1)
 ; LP64-NEXT:    fld ft8, 128(a1)
@@ -141,20 +141,20 @@ define void @callee() nounwind {
 ; LP64-NEXT:    fsd ft8, 128(a1)
 ; LP64-NEXT:    fsd fa7, 120(a1)
 ; LP64-NEXT:    fsd fa6, 112(a1)
-; LP64-NEXT:    fsd fa5, 104(a1)
-; LP64-NEXT:    fsd fa4, 96(a1)
-; LP64-NEXT:    fsd fa3, 88(a1)
-; LP64-NEXT:    fsd fa2, 80(a1)
-; LP64-NEXT:    fsd fa1, 72(a1)
-; LP64-NEXT:    fsd fa0, 64(a1)
-; LP64-NEXT:    fsd ft7, 56(a1)
-; LP64-NEXT:    fsd ft6, 48(a1)
-; LP64-NEXT:    fsd ft5, 40(a1)
-; LP64-NEXT:    fsd ft4, 32(a1)
-; LP64-NEXT:    fsd ft3, 24(a1)
-; LP64-NEXT:    fsd ft2, 16(a1)
-; LP64-NEXT:    fsd ft1, %lo(var+8)(a0)
-; LP64-NEXT:    fsd ft0, %lo(var)(a0)
+; LP64-NEXT:    fsd ft7, 104(a1)
+; LP64-NEXT:    fsd ft6, 96(a1)
+; LP64-NEXT:    fsd ft5, 88(a1)
+; LP64-NEXT:    fsd ft4, 80(a1)
+; LP64-NEXT:    fsd ft3, 72(a1)
+; LP64-NEXT:    fsd ft2, 64(a1)
+; LP64-NEXT:    fsd ft1, 56(a1)
+; LP64-NEXT:    fsd ft0, 48(a1)
+; LP64-NEXT:    fsd fa0, 40(a1)
+; LP64-NEXT:    fsd fa1, 32(a1)
+; LP64-NEXT:    fsd fa2, 24(a1)
+; LP64-NEXT:    fsd fa3, 16(a1)
+; LP64-NEXT:    fsd fa4, %lo(var+8)(a0)
+; LP64-NEXT:    fsd fa5, %lo(var)(a0)
 ; LP64-NEXT:    ret
 ;
 ; ILP32D-LABEL: callee:
@@ -173,21 +173,21 @@ define void @callee() nounwind {
 ; ILP32D-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fsd fs11, 0(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    lui a0, %hi(var)
-; ILP32D-NEXT:    fld ft0, %lo(var)(a0)
-; ILP32D-NEXT:    fld ft1, %lo(var+8)(a0)
+; ILP32D-NEXT:    fld fa5, %lo(var)(a0)
+; ILP32D-NEXT:    fld fa4, %lo(var+8)(a0)
 ; ILP32D-NEXT:    addi a1, a0, %lo(var)
-; ILP32D-NEXT:    fld ft2, 16(a1)
-; ILP32D-NEXT:    fld ft3, 24(a1)
-; ILP32D-NEXT:    fld ft4, 32(a1)
-; ILP32D-NEXT:    fld ft5, 40(a1)
-; ILP32D-NEXT:    fld ft6, 48(a1)
-; ILP32D-NEXT:    fld ft7, 56(a1)
-; ILP32D-NEXT:    fld fa0, 64(a1)
-; ILP32D-NEXT:    fld fa1, 72(a1)
-; ILP32D-NEXT:    fld fa2, 80(a1)
-; ILP32D-NEXT:    fld fa3, 88(a1)
-; ILP32D-NEXT:    fld fa4, 96(a1)
-; ILP32D-NEXT:    fld fa5, 104(a1)
+; ILP32D-NEXT:    fld fa3, 16(a1)
+; ILP32D-NEXT:    fld fa2, 24(a1)
+; ILP32D-NEXT:    fld fa1, 32(a1)
+; ILP32D-NEXT:    fld fa0, 40(a1)
+; ILP32D-NEXT:    fld ft0, 48(a1)
+; ILP32D-NEXT:    fld ft1, 56(a1)
+; ILP32D-NEXT:    fld ft2, 64(a1)
+; ILP32D-NEXT:    fld ft3, 72(a1)
+; ILP32D-NEXT:    fld ft4, 80(a1)
+; ILP32D-NEXT:    fld ft5, 88(a1)
+; ILP32D-NEXT:    fld ft6, 96(a1)
+; ILP32D-NEXT:    fld ft7, 104(a1)
 ; ILP32D-NEXT:    fld fa6, 112(a1)
 ; ILP32D-NEXT:    fld fa7, 120(a1)
 ; ILP32D-NEXT:    fld ft8, 128(a1)
@@ -224,20 +224,20 @@ define void @callee() nounwind {
 ; ILP32D-NEXT:    fsd ft8, 128(a1)
 ; ILP32D-NEXT:    fsd fa7, 120(a1)
 ; ILP32D-NEXT:    fsd fa6, 112(a1)
-; ILP32D-NEXT:    fsd fa5, 104(a1)
-; ILP32D-NEXT:    fsd fa4, 96(a1)
-; ILP32D-NEXT:    fsd fa3, 88(a1)
-; ILP32D-NEXT:    fsd fa2, 80(a1)
-; ILP32D-NEXT:    fsd fa1, 72(a1)
-; ILP32D-NEXT:    fsd fa0, 64(a1)
-; ILP32D-NEXT:    fsd ft7, 56(a1)
-; ILP32D-NEXT:    fsd ft6, 48(a1)
-; ILP32D-NEXT:    fsd ft5, 40(a1)
-; ILP32D-NEXT:    fsd ft4, 32(a1)
-; ILP32D-NEXT:    fsd ft3, 24(a1)
-; ILP32D-NEXT:    fsd ft2, 16(a1)
-; ILP32D-NEXT:    fsd ft1, %lo(var+8)(a0)
-; ILP32D-NEXT:    fsd ft0, %lo(var)(a0)
+; ILP32D-NEXT:    fsd ft7, 104(a1)
+; ILP32D-NEXT:    fsd ft6, 96(a1)
+; ILP32D-NEXT:    fsd ft5, 88(a1)
+; ILP32D-NEXT:    fsd ft4, 80(a1)
+; ILP32D-NEXT:    fsd ft3, 72(a1)
+; ILP32D-NEXT:    fsd ft2, 64(a1)
+; ILP32D-NEXT:    fsd ft1, 56(a1)
+; ILP32D-NEXT:    fsd ft0, 48(a1)
+; ILP32D-NEXT:    fsd fa0, 40(a1)
+; ILP32D-NEXT:    fsd fa1, 32(a1)
+; ILP32D-NEXT:    fsd fa2, 24(a1)
+; ILP32D-NEXT:    fsd fa3, 16(a1)
+; ILP32D-NEXT:    fsd fa4, %lo(var+8)(a0)
+; ILP32D-NEXT:    fsd fa5, %lo(var)(a0)
 ; ILP32D-NEXT:    fld fs0, 88(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fld fs1, 80(sp) # 8-byte Folded Reload
 ; ILP32D-NEXT:    fld fs2, 72(sp) # 8-byte Folded Reload
@@ -269,21 +269,21 @@ define void @callee() nounwind {
 ; LP64D-NEXT:    fsd fs10, 8(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fsd fs11, 0(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    lui a0, %hi(var)
-; LP64D-NEXT:    fld ft0, %lo(var)(a0)
-; LP64D-NEXT:    fld ft1, %lo(var+8)(a0)
+; LP64D-NEXT:    fld fa5, %lo(var)(a0)
+; LP64D-NEXT:    fld fa4, %lo(var+8)(a0)
 ; LP64D-NEXT:    addi a1, a0, %lo(var)
-; LP64D-NEXT:    fld ft2, 16(a1)
-; LP64D-NEXT:    fld ft3, 24(a1)
-; LP64D-NEXT:    fld ft4, 32(a1)
-; LP64D-NEXT:    fld ft5, 40(a1)
-; LP64D-NEXT:    fld ft6, 48(a1)
-; LP64D-NEXT:    fld ft7, 56(a1)
-; LP64D-NEXT:    fld fa0, 64(a1)
-; LP64D-NEXT:    fld fa1, 72(a1)
-; LP64D-NEXT:    fld fa2, 80(a1)
-; LP64D-NEXT:    fld fa3, 88(a1)
-; LP64D-NEXT:    fld fa4, 96(a1)
-; LP64D-NEXT:    fld fa5, 104(a1)
+; LP64D-NEXT:    fld fa3, 16(a1)
+; LP64D-NEXT:    fld fa2, 24(a1)
+; LP64D-NEXT:    fld fa1, 32(a1)
+; LP64D-NEXT:    fld fa0, 40(a1)
+; LP64D-NEXT:    fld ft0, 48(a1)
+; LP64D-NEXT:    fld ft1, 56(a1)
+; LP64D-NEXT:    fld ft2, 64(a1)
+; LP64D-NEXT:    fld ft3, 72(a1)
+; LP64D-NEXT:    fld ft4, 80(a1)
+; LP64D-NEXT:    fld ft5, 88(a1)
+; LP64D-NEXT:    fld ft6, 96(a1)
+; LP64D-NEXT:    fld ft7, 104(a1)
 ; LP64D-NEXT:    fld fa6, 112(a1)
 ; LP64D-NEXT:    fld fa7, 120(a1)
 ; LP64D-NEXT:    fld ft8, 128(a1)
@@ -320,20 +320,20 @@ define void @callee() nounwind {
 ; LP64D-NEXT:    fsd ft8, 128(a1)
 ; LP64D-NEXT:    fsd fa7, 120(a1)
 ; LP64D-NEXT:    fsd fa6, 112(a1)
-; LP64D-NEXT:    fsd fa5, 104(a1)
-; LP64D-NEXT:    fsd fa4, 96(a1)
-; LP64D-NEXT:    fsd fa3, 88(a1)
-; LP64D-NEXT:    fsd fa2, 80(a1)
-; LP64D-NEXT:    fsd fa1, 72(a1)
-; LP64D-NEXT:    fsd fa0, 64(a1)
-; LP64D-NEXT:    fsd ft7, 56(a1)
-; LP64D-NEXT:    fsd ft6, 48(a1)
-; LP64D-NEXT:    fsd ft5, 40(a1)
-; LP64D-NEXT:    fsd ft4, 32(a1)
-; LP64D-NEXT:    fsd ft3, 24(a1)
-; LP64D-NEXT:    fsd ft2, 16(a1)
-; LP64D-NEXT:    fsd ft1, %lo(var+8)(a0)
-; LP64D-NEXT:    fsd ft0, %lo(var)(a0)
+; LP64D-NEXT:    fsd ft7, 104(a1)
+; LP64D-NEXT:    fsd ft6, 96(a1)
+; LP64D-NEXT:    fsd ft5, 88(a1)
+; LP64D-NEXT:    fsd ft4, 80(a1)
+; LP64D-NEXT:    fsd ft3, 72(a1)
+; LP64D-NEXT:    fsd ft2, 64(a1)
+; LP64D-NEXT:    fsd ft1, 56(a1)
+; LP64D-NEXT:    fsd ft0, 48(a1)
+; LP64D-NEXT:    fsd fa0, 40(a1)
+; LP64D-NEXT:    fsd fa1, 32(a1)
+; LP64D-NEXT:    fsd fa2, 24(a1)
+; LP64D-NEXT:    fsd fa3, 16(a1)
+; LP64D-NEXT:    fsd fa4, %lo(var+8)(a0)
+; LP64D-NEXT:    fsd fa5, %lo(var)(a0)
 ; LP64D-NEXT:    fld fs0, 88(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fld fs1, 80(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    fld fs2, 72(sp) # 8-byte Folded Reload
@@ -368,136 +368,136 @@ define void @caller() nounwind {
 ; ILP32-NEXT:    sw s0, 264(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    sw s1, 260(sp) # 4-byte Folded Spill
 ; ILP32-NEXT:    lui s0, %hi(var)
-; ILP32-NEXT:    fld ft0, %lo(var)(s0)
-; ILP32-NEXT:    fsd ft0, 248(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, %lo(var+8)(s0)
-; ILP32-NEXT:    fsd ft0, 240(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, %lo(var)(s0)
+; ILP32-NEXT:    fsd fa5, 248(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, %lo(var+8)(s0)
+; ILP32-NEXT:    fsd fa5, 240(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    addi s1, s0, %lo(var)
-; ILP32-NEXT:    fld ft0, 16(s1)
-; ILP32-NEXT:    fsd ft0, 232(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 24(s1)
-; ILP32-NEXT:    fsd ft0, 224(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 32(s1)
-; ILP32-NEXT:    fsd ft0, 216(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 40(s1)
-; ILP32-NEXT:    fsd ft0, 208(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 48(s1)
-; ILP32-NEXT:    fsd ft0, 200(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 56(s1)
-; ILP32-NEXT:    fsd ft0, 192(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 64(s1)
-; ILP32-NEXT:    fsd ft0, 184(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 72(s1)
-; ILP32-NEXT:    fsd ft0, 176(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 80(s1)
-; ILP32-NEXT:    fsd ft0, 168(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 88(s1)
-; ILP32-NEXT:    fsd ft0, 160(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 96(s1)
-; ILP32-NEXT:    fsd ft0, 152(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 104(s1)
-; ILP32-NEXT:    fsd ft0, 144(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 112(s1)
-; ILP32-NEXT:    fsd ft0, 136(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 120(s1)
-; ILP32-NEXT:    fsd ft0, 128(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 128(s1)
-; ILP32-NEXT:    fsd ft0, 120(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 136(s1)
-; ILP32-NEXT:    fsd ft0, 112(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 144(s1)
-; ILP32-NEXT:    fsd ft0, 104(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 152(s1)
-; ILP32-NEXT:    fsd ft0, 96(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 160(s1)
-; ILP32-NEXT:    fsd ft0, 88(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 168(s1)
-; ILP32-NEXT:    fsd ft0, 80(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 176(s1)
-; ILP32-NEXT:    fsd ft0, 72(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 184(s1)
-; ILP32-NEXT:    fsd ft0, 64(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 192(s1)
-; ILP32-NEXT:    fsd ft0, 56(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 200(s1)
-; ILP32-NEXT:    fsd ft0, 48(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 208(s1)
-; ILP32-NEXT:    fsd ft0, 40(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 216(s1)
-; ILP32-NEXT:    fsd ft0, 32(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 224(s1)
-; ILP32-NEXT:    fsd ft0, 24(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 232(s1)
-; ILP32-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 240(s1)
-; ILP32-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
-; ILP32-NEXT:    fld ft0, 248(s1)
-; ILP32-NEXT:    fsd ft0, 0(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 16(s1)
+; ILP32-NEXT:    fsd fa5, 232(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 24(s1)
+; ILP32-NEXT:    fsd fa5, 224(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 32(s1)
+; ILP32-NEXT:    fsd fa5, 216(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 40(s1)
+; ILP32-NEXT:    fsd fa5, 208(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 48(s1)
+; ILP32-NEXT:    fsd fa5, 200(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 56(s1)
+; ILP32-NEXT:    fsd fa5, 192(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 64(s1)
+; ILP32-NEXT:    fsd fa5, 184(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 72(s1)
+; ILP32-NEXT:    fsd fa5, 176(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 80(s1)
+; ILP32-NEXT:    fsd fa5, 168(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 88(s1)
+; ILP32-NEXT:    fsd fa5, 160(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 96(s1)
+; ILP32-NEXT:    fsd fa5, 152(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 104(s1)
+; ILP32-NEXT:    fsd fa5, 144(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 112(s1)
+; ILP32-NEXT:    fsd fa5, 136(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 120(s1)
+; ILP32-NEXT:    fsd fa5, 128(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 128(s1)
+; ILP32-NEXT:    fsd fa5, 120(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 136(s1)
+; ILP32-NEXT:    fsd fa5, 112(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 144(s1)
+; ILP32-NEXT:    fsd fa5, 104(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 152(s1)
+; ILP32-NEXT:    fsd fa5, 96(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 160(s1)
+; ILP32-NEXT:    fsd fa5, 88(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 168(s1)
+; ILP32-NEXT:    fsd fa5, 80(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 176(s1)
+; ILP32-NEXT:    fsd fa5, 72(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 184(s1)
+; ILP32-NEXT:    fsd fa5, 64(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 192(s1)
+; ILP32-NEXT:    fsd fa5, 56(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 200(s1)
+; ILP32-NEXT:    fsd fa5, 48(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 208(s1)
+; ILP32-NEXT:    fsd fa5, 40(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 216(s1)
+; ILP32-NEXT:    fsd fa5, 32(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 224(s1)
+; ILP32-NEXT:    fsd fa5, 24(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 232(s1)
+; ILP32-NEXT:    fsd fa5, 16(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 240(s1)
+; ILP32-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
+; ILP32-NEXT:    fld fa5, 248(s1)
+; ILP32-NEXT:    fsd fa5, 0(sp) # 8-byte Folded Spill
 ; ILP32-NEXT:    call callee at plt
-; ILP32-NEXT:    fld ft0, 0(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 248(s1)
-; ILP32-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 240(s1)
-; ILP32-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 232(s1)
-; ILP32-NEXT:    fld ft0, 24(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 224(s1)
-; ILP32-NEXT:    fld ft0, 32(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 216(s1)
-; ILP32-NEXT:    fld ft0, 40(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 208(s1)
-; ILP32-NEXT:    fld ft0, 48(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 200(s1)
-; ILP32-NEXT:    fld ft0, 56(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 192(s1)
-; ILP32-NEXT:    fld ft0, 64(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 184(s1)
-; ILP32-NEXT:    fld ft0, 72(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 176(s1)
-; ILP32-NEXT:    fld ft0, 80(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 168(s1)
-; ILP32-NEXT:    fld ft0, 88(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 160(s1)
-; ILP32-NEXT:    fld ft0, 96(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 152(s1)
-; ILP32-NEXT:    fld ft0, 104(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 144(s1)
-; ILP32-NEXT:    fld ft0, 112(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 136(s1)
-; ILP32-NEXT:    fld ft0, 120(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 128(s1)
-; ILP32-NEXT:    fld ft0, 128(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 120(s1)
-; ILP32-NEXT:    fld ft0, 136(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 112(s1)
-; ILP32-NEXT:    fld ft0, 144(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 104(s1)
-; ILP32-NEXT:    fld ft0, 152(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 96(s1)
-; ILP32-NEXT:    fld ft0, 160(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 88(s1)
-; ILP32-NEXT:    fld ft0, 168(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 80(s1)
-; ILP32-NEXT:    fld ft0, 176(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 72(s1)
-; ILP32-NEXT:    fld ft0, 184(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 64(s1)
-; ILP32-NEXT:    fld ft0, 192(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 56(s1)
-; ILP32-NEXT:    fld ft0, 200(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 48(s1)
-; ILP32-NEXT:    fld ft0, 208(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 40(s1)
-; ILP32-NEXT:    fld ft0, 216(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 32(s1)
-; ILP32-NEXT:    fld ft0, 224(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 24(s1)
-; ILP32-NEXT:    fld ft0, 232(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, 16(s1)
-; ILP32-NEXT:    fld ft0, 240(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, %lo(var+8)(s0)
-; ILP32-NEXT:    fld ft0, 248(sp) # 8-byte Folded Reload
-; ILP32-NEXT:    fsd ft0, %lo(var)(s0)
+; ILP32-NEXT:    fld fa5, 0(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 248(s1)
+; ILP32-NEXT:    fld fa5, 8(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 240(s1)
+; ILP32-NEXT:    fld fa5, 16(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 232(s1)
+; ILP32-NEXT:    fld fa5, 24(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 224(s1)
+; ILP32-NEXT:    fld fa5, 32(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 216(s1)
+; ILP32-NEXT:    fld fa5, 40(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 208(s1)
+; ILP32-NEXT:    fld fa5, 48(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 200(s1)
+; ILP32-NEXT:    fld fa5, 56(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 192(s1)
+; ILP32-NEXT:    fld fa5, 64(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 184(s1)
+; ILP32-NEXT:    fld fa5, 72(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 176(s1)
+; ILP32-NEXT:    fld fa5, 80(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 168(s1)
+; ILP32-NEXT:    fld fa5, 88(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 160(s1)
+; ILP32-NEXT:    fld fa5, 96(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 152(s1)
+; ILP32-NEXT:    fld fa5, 104(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 144(s1)
+; ILP32-NEXT:    fld fa5, 112(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 136(s1)
+; ILP32-NEXT:    fld fa5, 120(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 128(s1)
+; ILP32-NEXT:    fld fa5, 128(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 120(s1)
+; ILP32-NEXT:    fld fa5, 136(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 112(s1)
+; ILP32-NEXT:    fld fa5, 144(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 104(s1)
+; ILP32-NEXT:    fld fa5, 152(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 96(s1)
+; ILP32-NEXT:    fld fa5, 160(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 88(s1)
+; ILP32-NEXT:    fld fa5, 168(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 80(s1)
+; ILP32-NEXT:    fld fa5, 176(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 72(s1)
+; ILP32-NEXT:    fld fa5, 184(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 64(s1)
+; ILP32-NEXT:    fld fa5, 192(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 56(s1)
+; ILP32-NEXT:    fld fa5, 200(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 48(s1)
+; ILP32-NEXT:    fld fa5, 208(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 40(s1)
+; ILP32-NEXT:    fld fa5, 216(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 32(s1)
+; ILP32-NEXT:    fld fa5, 224(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 24(s1)
+; ILP32-NEXT:    fld fa5, 232(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, 16(s1)
+; ILP32-NEXT:    fld fa5, 240(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, %lo(var+8)(s0)
+; ILP32-NEXT:    fld fa5, 248(sp) # 8-byte Folded Reload
+; ILP32-NEXT:    fsd fa5, %lo(var)(s0)
 ; ILP32-NEXT:    lw ra, 268(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    lw s0, 264(sp) # 4-byte Folded Reload
 ; ILP32-NEXT:    lw s1, 260(sp) # 4-byte Folded Reload
@@ -511,136 +511,136 @@ define void @caller() nounwind {
 ; LP64-NEXT:    sd s0, 272(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    sd s1, 264(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    lui s0, %hi(var)
-; LP64-NEXT:    fld ft0, %lo(var)(s0)
-; LP64-NEXT:    fsd ft0, 256(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, %lo(var+8)(s0)
-; LP64-NEXT:    fsd ft0, 248(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, %lo(var)(s0)
+; LP64-NEXT:    fsd fa5, 256(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, %lo(var+8)(s0)
+; LP64-NEXT:    fsd fa5, 248(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    addi s1, s0, %lo(var)
-; LP64-NEXT:    fld ft0, 16(s1)
-; LP64-NEXT:    fsd ft0, 240(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 24(s1)
-; LP64-NEXT:    fsd ft0, 232(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 32(s1)
-; LP64-NEXT:    fsd ft0, 224(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 40(s1)
-; LP64-NEXT:    fsd ft0, 216(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 48(s1)
-; LP64-NEXT:    fsd ft0, 208(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 56(s1)
-; LP64-NEXT:    fsd ft0, 200(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 64(s1)
-; LP64-NEXT:    fsd ft0, 192(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 72(s1)
-; LP64-NEXT:    fsd ft0, 184(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 80(s1)
-; LP64-NEXT:    fsd ft0, 176(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 88(s1)
-; LP64-NEXT:    fsd ft0, 168(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 96(s1)
-; LP64-NEXT:    fsd ft0, 160(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 104(s1)
-; LP64-NEXT:    fsd ft0, 152(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 112(s1)
-; LP64-NEXT:    fsd ft0, 144(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 120(s1)
-; LP64-NEXT:    fsd ft0, 136(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 128(s1)
-; LP64-NEXT:    fsd ft0, 128(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 136(s1)
-; LP64-NEXT:    fsd ft0, 120(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 144(s1)
-; LP64-NEXT:    fsd ft0, 112(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 152(s1)
-; LP64-NEXT:    fsd ft0, 104(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 160(s1)
-; LP64-NEXT:    fsd ft0, 96(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 168(s1)
-; LP64-NEXT:    fsd ft0, 88(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 176(s1)
-; LP64-NEXT:    fsd ft0, 80(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 184(s1)
-; LP64-NEXT:    fsd ft0, 72(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 192(s1)
-; LP64-NEXT:    fsd ft0, 64(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 200(s1)
-; LP64-NEXT:    fsd ft0, 56(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 208(s1)
-; LP64-NEXT:    fsd ft0, 48(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 216(s1)
-; LP64-NEXT:    fsd ft0, 40(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 224(s1)
-; LP64-NEXT:    fsd ft0, 32(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 232(s1)
-; LP64-NEXT:    fsd ft0, 24(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 240(s1)
-; LP64-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
-; LP64-NEXT:    fld ft0, 248(s1)
-; LP64-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 16(s1)
+; LP64-NEXT:    fsd fa5, 240(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 24(s1)
+; LP64-NEXT:    fsd fa5, 232(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 32(s1)
+; LP64-NEXT:    fsd fa5, 224(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 40(s1)
+; LP64-NEXT:    fsd fa5, 216(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 48(s1)
+; LP64-NEXT:    fsd fa5, 208(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 56(s1)
+; LP64-NEXT:    fsd fa5, 200(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 64(s1)
+; LP64-NEXT:    fsd fa5, 192(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 72(s1)
+; LP64-NEXT:    fsd fa5, 184(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 80(s1)
+; LP64-NEXT:    fsd fa5, 176(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 88(s1)
+; LP64-NEXT:    fsd fa5, 168(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 96(s1)
+; LP64-NEXT:    fsd fa5, 160(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 104(s1)
+; LP64-NEXT:    fsd fa5, 152(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 112(s1)
+; LP64-NEXT:    fsd fa5, 144(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 120(s1)
+; LP64-NEXT:    fsd fa5, 136(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 128(s1)
+; LP64-NEXT:    fsd fa5, 128(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 136(s1)
+; LP64-NEXT:    fsd fa5, 120(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 144(s1)
+; LP64-NEXT:    fsd fa5, 112(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 152(s1)
+; LP64-NEXT:    fsd fa5, 104(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 160(s1)
+; LP64-NEXT:    fsd fa5, 96(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 168(s1)
+; LP64-NEXT:    fsd fa5, 88(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 176(s1)
+; LP64-NEXT:    fsd fa5, 80(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 184(s1)
+; LP64-NEXT:    fsd fa5, 72(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 192(s1)
+; LP64-NEXT:    fsd fa5, 64(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 200(s1)
+; LP64-NEXT:    fsd fa5, 56(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 208(s1)
+; LP64-NEXT:    fsd fa5, 48(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 216(s1)
+; LP64-NEXT:    fsd fa5, 40(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 224(s1)
+; LP64-NEXT:    fsd fa5, 32(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 232(s1)
+; LP64-NEXT:    fsd fa5, 24(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 240(s1)
+; LP64-NEXT:    fsd fa5, 16(sp) # 8-byte Folded Spill
+; LP64-NEXT:    fld fa5, 248(s1)
+; LP64-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
 ; LP64-NEXT:    call callee at plt
-; LP64-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 248(s1)
-; LP64-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 240(s1)
-; LP64-NEXT:    fld ft0, 24(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 232(s1)
-; LP64-NEXT:    fld ft0, 32(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 224(s1)
-; LP64-NEXT:    fld ft0, 40(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 216(s1)
-; LP64-NEXT:    fld ft0, 48(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 208(s1)
-; LP64-NEXT:    fld ft0, 56(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 200(s1)
-; LP64-NEXT:    fld ft0, 64(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 192(s1)
-; LP64-NEXT:    fld ft0, 72(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 184(s1)
-; LP64-NEXT:    fld ft0, 80(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 176(s1)
-; LP64-NEXT:    fld ft0, 88(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 168(s1)
-; LP64-NEXT:    fld ft0, 96(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 160(s1)
-; LP64-NEXT:    fld ft0, 104(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 152(s1)
-; LP64-NEXT:    fld ft0, 112(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 144(s1)
-; LP64-NEXT:    fld ft0, 120(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 136(s1)
-; LP64-NEXT:    fld ft0, 128(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 128(s1)
-; LP64-NEXT:    fld ft0, 136(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 120(s1)
-; LP64-NEXT:    fld ft0, 144(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 112(s1)
-; LP64-NEXT:    fld ft0, 152(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 104(s1)
-; LP64-NEXT:    fld ft0, 160(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 96(s1)
-; LP64-NEXT:    fld ft0, 168(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 88(s1)
-; LP64-NEXT:    fld ft0, 176(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 80(s1)
-; LP64-NEXT:    fld ft0, 184(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 72(s1)
-; LP64-NEXT:    fld ft0, 192(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 64(s1)
-; LP64-NEXT:    fld ft0, 200(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 56(s1)
-; LP64-NEXT:    fld ft0, 208(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 48(s1)
-; LP64-NEXT:    fld ft0, 216(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 40(s1)
-; LP64-NEXT:    fld ft0, 224(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 32(s1)
-; LP64-NEXT:    fld ft0, 232(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 24(s1)
-; LP64-NEXT:    fld ft0, 240(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, 16(s1)
-; LP64-NEXT:    fld ft0, 248(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, %lo(var+8)(s0)
-; LP64-NEXT:    fld ft0, 256(sp) # 8-byte Folded Reload
-; LP64-NEXT:    fsd ft0, %lo(var)(s0)
+; LP64-NEXT:    fld fa5, 8(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 248(s1)
+; LP64-NEXT:    fld fa5, 16(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 240(s1)
+; LP64-NEXT:    fld fa5, 24(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 232(s1)
+; LP64-NEXT:    fld fa5, 32(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 224(s1)
+; LP64-NEXT:    fld fa5, 40(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 216(s1)
+; LP64-NEXT:    fld fa5, 48(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 208(s1)
+; LP64-NEXT:    fld fa5, 56(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 200(s1)
+; LP64-NEXT:    fld fa5, 64(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 192(s1)
+; LP64-NEXT:    fld fa5, 72(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 184(s1)
+; LP64-NEXT:    fld fa5, 80(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 176(s1)
+; LP64-NEXT:    fld fa5, 88(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 168(s1)
+; LP64-NEXT:    fld fa5, 96(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 160(s1)
+; LP64-NEXT:    fld fa5, 104(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 152(s1)
+; LP64-NEXT:    fld fa5, 112(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 144(s1)
+; LP64-NEXT:    fld fa5, 120(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 136(s1)
+; LP64-NEXT:    fld fa5, 128(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 128(s1)
+; LP64-NEXT:    fld fa5, 136(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 120(s1)
+; LP64-NEXT:    fld fa5, 144(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 112(s1)
+; LP64-NEXT:    fld fa5, 152(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 104(s1)
+; LP64-NEXT:    fld fa5, 160(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 96(s1)
+; LP64-NEXT:    fld fa5, 168(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 88(s1)
+; LP64-NEXT:    fld fa5, 176(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 80(s1)
+; LP64-NEXT:    fld fa5, 184(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 72(s1)
+; LP64-NEXT:    fld fa5, 192(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 64(s1)
+; LP64-NEXT:    fld fa5, 200(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 56(s1)
+; LP64-NEXT:    fld fa5, 208(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 48(s1)
+; LP64-NEXT:    fld fa5, 216(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 40(s1)
+; LP64-NEXT:    fld fa5, 224(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 32(s1)
+; LP64-NEXT:    fld fa5, 232(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 24(s1)
+; LP64-NEXT:    fld fa5, 240(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, 16(s1)
+; LP64-NEXT:    fld fa5, 248(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, %lo(var+8)(s0)
+; LP64-NEXT:    fld fa5, 256(sp) # 8-byte Folded Reload
+; LP64-NEXT:    fsd fa5, %lo(var)(s0)
 ; LP64-NEXT:    ld ra, 280(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    ld s0, 272(sp) # 8-byte Folded Reload
 ; LP64-NEXT:    ld s1, 264(sp) # 8-byte Folded Reload
@@ -666,47 +666,47 @@ define void @caller() nounwind {
 ; ILP32D-NEXT:    fsd fs10, 168(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fsd fs11, 160(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    lui s0, %hi(var)
-; ILP32D-NEXT:    fld ft0, %lo(var)(s0)
-; ILP32D-NEXT:    fsd ft0, 152(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, %lo(var+8)(s0)
-; ILP32D-NEXT:    fsd ft0, 144(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, %lo(var)(s0)
+; ILP32D-NEXT:    fsd fa5, 152(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, %lo(var+8)(s0)
+; ILP32D-NEXT:    fsd fa5, 144(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    addi s1, s0, %lo(var)
-; ILP32D-NEXT:    fld ft0, 16(s1)
-; ILP32D-NEXT:    fsd ft0, 136(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 24(s1)
-; ILP32D-NEXT:    fsd ft0, 128(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 32(s1)
-; ILP32D-NEXT:    fsd ft0, 120(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 40(s1)
-; ILP32D-NEXT:    fsd ft0, 112(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 48(s1)
-; ILP32D-NEXT:    fsd ft0, 104(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 56(s1)
-; ILP32D-NEXT:    fsd ft0, 96(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 64(s1)
-; ILP32D-NEXT:    fsd ft0, 88(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 72(s1)
-; ILP32D-NEXT:    fsd ft0, 80(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 80(s1)
-; ILP32D-NEXT:    fsd ft0, 72(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 88(s1)
-; ILP32D-NEXT:    fsd ft0, 64(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 96(s1)
-; ILP32D-NEXT:    fsd ft0, 56(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 104(s1)
-; ILP32D-NEXT:    fsd ft0, 48(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 112(s1)
-; ILP32D-NEXT:    fsd ft0, 40(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 120(s1)
-; ILP32D-NEXT:    fsd ft0, 32(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 128(s1)
-; ILP32D-NEXT:    fsd ft0, 24(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 136(s1)
-; ILP32D-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 144(s1)
-; ILP32D-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
-; ILP32D-NEXT:    fld ft0, 152(s1)
-; ILP32D-NEXT:    fsd ft0, 0(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 16(s1)
+; ILP32D-NEXT:    fsd fa5, 136(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 24(s1)
+; ILP32D-NEXT:    fsd fa5, 128(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 32(s1)
+; ILP32D-NEXT:    fsd fa5, 120(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 40(s1)
+; ILP32D-NEXT:    fsd fa5, 112(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 48(s1)
+; ILP32D-NEXT:    fsd fa5, 104(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 56(s1)
+; ILP32D-NEXT:    fsd fa5, 96(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 64(s1)
+; ILP32D-NEXT:    fsd fa5, 88(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 72(s1)
+; ILP32D-NEXT:    fsd fa5, 80(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 80(s1)
+; ILP32D-NEXT:    fsd fa5, 72(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 88(s1)
+; ILP32D-NEXT:    fsd fa5, 64(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 96(s1)
+; ILP32D-NEXT:    fsd fa5, 56(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 104(s1)
+; ILP32D-NEXT:    fsd fa5, 48(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 112(s1)
+; ILP32D-NEXT:    fsd fa5, 40(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 120(s1)
+; ILP32D-NEXT:    fsd fa5, 32(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 128(s1)
+; ILP32D-NEXT:    fsd fa5, 24(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 136(s1)
+; ILP32D-NEXT:    fsd fa5, 16(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 144(s1)
+; ILP32D-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
+; ILP32D-NEXT:    fld fa5, 152(s1)
+; ILP32D-NEXT:    fsd fa5, 0(sp) # 8-byte Folded Spill
 ; ILP32D-NEXT:    fld fs8, 160(s1)
 ; ILP32D-NEXT:    fld fs9, 168(s1)
 ; ILP32D-NEXT:    fld fs10, 176(s1)
@@ -732,46 +732,46 @@ define void @caller() nounwind {
 ; ILP32D-NEXT:    fsd fs10, 176(s1)
 ; ILP32D-NEXT:    fsd fs9, 168(s1)
 ; ILP32D-NEXT:    fsd fs8, 160(s1)
-; ILP32D-NEXT:    fld ft0, 0(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 152(s1)
-; ILP32D-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 144(s1)
-; ILP32D-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 136(s1)
-; ILP32D-NEXT:    fld ft0, 24(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 128(s1)
-; ILP32D-NEXT:    fld ft0, 32(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 120(s1)
-; ILP32D-NEXT:    fld ft0, 40(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 112(s1)
-; ILP32D-NEXT:    fld ft0, 48(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 104(s1)
-; ILP32D-NEXT:    fld ft0, 56(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 96(s1)
-; ILP32D-NEXT:    fld ft0, 64(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 88(s1)
-; ILP32D-NEXT:    fld ft0, 72(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 80(s1)
-; ILP32D-NEXT:    fld ft0, 80(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 72(s1)
-; ILP32D-NEXT:    fld ft0, 88(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 64(s1)
-; ILP32D-NEXT:    fld ft0, 96(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 56(s1)
-; ILP32D-NEXT:    fld ft0, 104(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 48(s1)
-; ILP32D-NEXT:    fld ft0, 112(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 40(s1)
-; ILP32D-NEXT:    fld ft0, 120(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 32(s1)
-; ILP32D-NEXT:    fld ft0, 128(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 24(s1)
-; ILP32D-NEXT:    fld ft0, 136(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, 16(s1)
-; ILP32D-NEXT:    fld ft0, 144(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, %lo(var+8)(s0)
-; ILP32D-NEXT:    fld ft0, 152(sp) # 8-byte Folded Reload
-; ILP32D-NEXT:    fsd ft0, %lo(var)(s0)
+; ILP32D-NEXT:    fld fa5, 0(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 152(s1)
+; ILP32D-NEXT:    fld fa5, 8(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 144(s1)
+; ILP32D-NEXT:    fld fa5, 16(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 136(s1)
+; ILP32D-NEXT:    fld fa5, 24(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 128(s1)
+; ILP32D-NEXT:    fld fa5, 32(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 120(s1)
+; ILP32D-NEXT:    fld fa5, 40(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 112(s1)
+; ILP32D-NEXT:    fld fa5, 48(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 104(s1)
+; ILP32D-NEXT:    fld fa5, 56(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 96(s1)
+; ILP32D-NEXT:    fld fa5, 64(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 88(s1)
+; ILP32D-NEXT:    fld fa5, 72(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 80(s1)
+; ILP32D-NEXT:    fld fa5, 80(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 72(s1)
+; ILP32D-NEXT:    fld fa5, 88(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 64(s1)
+; ILP32D-NEXT:    fld fa5, 96(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 56(s1)
+; ILP32D-NEXT:    fld fa5, 104(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 48(s1)
+; ILP32D-NEXT:    fld fa5, 112(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 40(s1)
+; ILP32D-NEXT:    fld fa5, 120(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 32(s1)
+; ILP32D-NEXT:    fld fa5, 128(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 24(s1)
+; ILP32D-NEXT:    fld fa5, 136(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, 16(s1)
+; ILP32D-NEXT:    fld fa5, 144(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, %lo(var+8)(s0)
+; ILP32D-NEXT:    fld fa5, 152(sp) # 8-byte Folded Reload
+; ILP32D-NEXT:    fsd fa5, %lo(var)(s0)
 ; ILP32D-NEXT:    lw ra, 268(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    lw s0, 264(sp) # 4-byte Folded Reload
 ; ILP32D-NEXT:    lw s1, 260(sp) # 4-byte Folded Reload
@@ -809,47 +809,47 @@ define void @caller() nounwind {
 ; LP64D-NEXT:    fsd fs10, 176(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fsd fs11, 168(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    lui s0, %hi(var)
-; LP64D-NEXT:    fld ft0, %lo(var)(s0)
-; LP64D-NEXT:    fsd ft0, 160(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, %lo(var+8)(s0)
-; LP64D-NEXT:    fsd ft0, 152(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, %lo(var)(s0)
+; LP64D-NEXT:    fsd fa5, 160(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, %lo(var+8)(s0)
+; LP64D-NEXT:    fsd fa5, 152(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    addi s1, s0, %lo(var)
-; LP64D-NEXT:    fld ft0, 16(s1)
-; LP64D-NEXT:    fsd ft0, 144(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 24(s1)
-; LP64D-NEXT:    fsd ft0, 136(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 32(s1)
-; LP64D-NEXT:    fsd ft0, 128(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 40(s1)
-; LP64D-NEXT:    fsd ft0, 120(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 48(s1)
-; LP64D-NEXT:    fsd ft0, 112(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 56(s1)
-; LP64D-NEXT:    fsd ft0, 104(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 64(s1)
-; LP64D-NEXT:    fsd ft0, 96(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 72(s1)
-; LP64D-NEXT:    fsd ft0, 88(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 80(s1)
-; LP64D-NEXT:    fsd ft0, 80(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 88(s1)
-; LP64D-NEXT:    fsd ft0, 72(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 96(s1)
-; LP64D-NEXT:    fsd ft0, 64(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 104(s1)
-; LP64D-NEXT:    fsd ft0, 56(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 112(s1)
-; LP64D-NEXT:    fsd ft0, 48(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 120(s1)
-; LP64D-NEXT:    fsd ft0, 40(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 128(s1)
-; LP64D-NEXT:    fsd ft0, 32(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 136(s1)
-; LP64D-NEXT:    fsd ft0, 24(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 144(s1)
-; LP64D-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
-; LP64D-NEXT:    fld ft0, 152(s1)
-; LP64D-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 16(s1)
+; LP64D-NEXT:    fsd fa5, 144(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 24(s1)
+; LP64D-NEXT:    fsd fa5, 136(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 32(s1)
+; LP64D-NEXT:    fsd fa5, 128(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 40(s1)
+; LP64D-NEXT:    fsd fa5, 120(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 48(s1)
+; LP64D-NEXT:    fsd fa5, 112(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 56(s1)
+; LP64D-NEXT:    fsd fa5, 104(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 64(s1)
+; LP64D-NEXT:    fsd fa5, 96(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 72(s1)
+; LP64D-NEXT:    fsd fa5, 88(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 80(s1)
+; LP64D-NEXT:    fsd fa5, 80(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 88(s1)
+; LP64D-NEXT:    fsd fa5, 72(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 96(s1)
+; LP64D-NEXT:    fsd fa5, 64(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 104(s1)
+; LP64D-NEXT:    fsd fa5, 56(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 112(s1)
+; LP64D-NEXT:    fsd fa5, 48(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 120(s1)
+; LP64D-NEXT:    fsd fa5, 40(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 128(s1)
+; LP64D-NEXT:    fsd fa5, 32(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 136(s1)
+; LP64D-NEXT:    fsd fa5, 24(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 144(s1)
+; LP64D-NEXT:    fsd fa5, 16(sp) # 8-byte Folded Spill
+; LP64D-NEXT:    fld fa5, 152(s1)
+; LP64D-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
 ; LP64D-NEXT:    fld fs8, 160(s1)
 ; LP64D-NEXT:    fld fs9, 168(s1)
 ; LP64D-NEXT:    fld fs10, 176(s1)
@@ -875,46 +875,46 @@ define void @caller() nounwind {
 ; LP64D-NEXT:    fsd fs10, 176(s1)
 ; LP64D-NEXT:    fsd fs9, 168(s1)
 ; LP64D-NEXT:    fsd fs8, 160(s1)
-; LP64D-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 152(s1)
-; LP64D-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 144(s1)
-; LP64D-NEXT:    fld ft0, 24(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 136(s1)
-; LP64D-NEXT:    fld ft0, 32(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 128(s1)
-; LP64D-NEXT:    fld ft0, 40(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 120(s1)
-; LP64D-NEXT:    fld ft0, 48(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 112(s1)
-; LP64D-NEXT:    fld ft0, 56(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 104(s1)
-; LP64D-NEXT:    fld ft0, 64(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 96(s1)
-; LP64D-NEXT:    fld ft0, 72(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 88(s1)
-; LP64D-NEXT:    fld ft0, 80(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 80(s1)
-; LP64D-NEXT:    fld ft0, 88(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 72(s1)
-; LP64D-NEXT:    fld ft0, 96(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 64(s1)
-; LP64D-NEXT:    fld ft0, 104(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 56(s1)
-; LP64D-NEXT:    fld ft0, 112(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 48(s1)
-; LP64D-NEXT:    fld ft0, 120(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 40(s1)
-; LP64D-NEXT:    fld ft0, 128(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 32(s1)
-; LP64D-NEXT:    fld ft0, 136(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 24(s1)
-; LP64D-NEXT:    fld ft0, 144(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, 16(s1)
-; LP64D-NEXT:    fld ft0, 152(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, %lo(var+8)(s0)
-; LP64D-NEXT:    fld ft0, 160(sp) # 8-byte Folded Reload
-; LP64D-NEXT:    fsd ft0, %lo(var)(s0)
+; LP64D-NEXT:    fld fa5, 8(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 152(s1)
+; LP64D-NEXT:    fld fa5, 16(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 144(s1)
+; LP64D-NEXT:    fld fa5, 24(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 136(s1)
+; LP64D-NEXT:    fld fa5, 32(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 128(s1)
+; LP64D-NEXT:    fld fa5, 40(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 120(s1)
+; LP64D-NEXT:    fld fa5, 48(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 112(s1)
+; LP64D-NEXT:    fld fa5, 56(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 104(s1)
+; LP64D-NEXT:    fld fa5, 64(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 96(s1)
+; LP64D-NEXT:    fld fa5, 72(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 88(s1)
+; LP64D-NEXT:    fld fa5, 80(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 80(s1)
+; LP64D-NEXT:    fld fa5, 88(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 72(s1)
+; LP64D-NEXT:    fld fa5, 96(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 64(s1)
+; LP64D-NEXT:    fld fa5, 104(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 56(s1)
+; LP64D-NEXT:    fld fa5, 112(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 48(s1)
+; LP64D-NEXT:    fld fa5, 120(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 40(s1)
+; LP64D-NEXT:    fld fa5, 128(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 32(s1)
+; LP64D-NEXT:    fld fa5, 136(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 24(s1)
+; LP64D-NEXT:    fld fa5, 144(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, 16(s1)
+; LP64D-NEXT:    fld fa5, 152(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, %lo(var+8)(s0)
+; LP64D-NEXT:    fld fa5, 160(sp) # 8-byte Folded Reload
+; LP64D-NEXT:    fsd fa5, %lo(var)(s0)
 ; LP64D-NEXT:    ld ra, 280(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    ld s0, 272(sp) # 8-byte Folded Reload
 ; LP64D-NEXT:    ld s1, 264(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
index 3c4088522f44..3b89158d7bfe 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -53,8 +53,8 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV32IF-NEXT:    mv s0, a0
 ; RV32IF-NEXT:    mv a0, a1
 ; RV32IF-NEXT:    call __extendhfsf2 at plt
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IF-NEXT:    add a0, s0, a0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -69,8 +69,8 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV64IF-NEXT:    mv s0, a0
 ; RV64IF-NEXT:    mv a0, a1
 ; RV64IF-NEXT:    call __extendhfsf2 at plt
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IF-NEXT:    addw a0, s0, a0
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -109,15 +109,15 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ;
 ; RV32-ILP32ZFHMIN-LABEL: callee_half_in_regs:
 ; RV32-ILP32ZFHMIN:       # %bb.0:
-; RV32-ILP32ZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a1, ft0, rtz
+; RV32-ILP32ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a1, fa5, rtz
 ; RV32-ILP32ZFHMIN-NEXT:    add a0, a0, a1
 ; RV32-ILP32ZFHMIN-NEXT:    ret
 ;
 ; RV64-LP64ZFHMIN-LABEL: callee_half_in_regs:
 ; RV64-LP64ZFHMIN:       # %bb.0:
-; RV64-LP64ZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64-LP64ZFHMIN-NEXT:    fcvt.w.s a1, ft0, rtz
+; RV64-LP64ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64-LP64ZFHMIN-NEXT:    fcvt.w.s a1, fa5, rtz
 ; RV64-LP64ZFHMIN-NEXT:    addw a0, a0, a1
 ; RV64-LP64ZFHMIN-NEXT:    ret
   %b_fptosi = fptosi half %b to i32
@@ -164,8 +164,8 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    lui a0, 1048564
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.x.w a1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fmv.x.w a1, fa5
 ; RV64IF-NEXT:    li a0, 1
 ; RV64IF-NEXT:    call callee_half_in_regs at plt
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -262,8 +262,8 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV32IF-NEXT:    lhu a0, 16(sp)
 ; RV32IF-NEXT:    mv s0, a7
 ; RV32IF-NEXT:    call __extendhfsf2 at plt
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IF-NEXT:    add a0, s0, a0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -278,8 +278,8 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV64IF-NEXT:    lhu a0, 16(sp)
 ; RV64IF-NEXT:    mv s0, a7
 ; RV64IF-NEXT:    call __extendhfsf2 at plt
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IF-NEXT:    addw a0, s0, a0
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -318,15 +318,15 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ;
 ; RV32-ILP32ZFHMIN-LABEL: callee_half_on_stack:
 ; RV32-ILP32ZFHMIN:       # %bb.0:
-; RV32-ILP32ZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-ILP32ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32-ILP32ZFHMIN-NEXT:    add a0, a7, a0
 ; RV32-ILP32ZFHMIN-NEXT:    ret
 ;
 ; RV64-LP64ZFHMIN-LABEL: callee_half_on_stack:
 ; RV64-LP64ZFHMIN:       # %bb.0:
-; RV64-LP64ZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64-LP64ZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64-LP64ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64-LP64ZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64-LP64ZFHMIN-NEXT:    addw a0, a7, a0
 ; RV64-LP64ZFHMIN-NEXT:    ret
   %1 = fptosi half %i to i32
@@ -516,8 +516,8 @@ define half @callee_half_ret() nounwind {
 ; RV64IF-LABEL: callee_half_ret:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, %hi(.LCPI4_0)
-; RV64IF-NEXT:    flw ft0, %lo(.LCPI4_0)(a0)
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    flw fa5, %lo(.LCPI4_0)(a0)
+; RV64IF-NEXT:    fmv.x.w a0, fa5
 ; RV64IF-NEXT:    ret
 ;
 ; RV32-ILP32F-LABEL: callee_half_ret:
@@ -579,8 +579,8 @@ define i32 @caller_half_ret() nounwind {
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    call callee_half_ret at plt
 ; RV32IF-NEXT:    call __extendhfsf2 at plt
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -591,8 +591,8 @@ define i32 @caller_half_ret() nounwind {
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    call callee_half_ret at plt
 ; RV64IF-NEXT:    call __extendhfsf2 at plt
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
 ; RV64IF-NEXT:    ret
@@ -626,8 +626,8 @@ define i32 @caller_half_ret() nounwind {
 ; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32ZFHMIN-NEXT:    call callee_half_ret at plt
-; RV32-ILP32ZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-ILP32ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32-ILP32ZFHMIN-NEXT:    ret
@@ -637,8 +637,8 @@ define i32 @caller_half_ret() nounwind {
 ; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-LP64ZFHMIN-NEXT:    call callee_half_ret at plt
-; RV64-LP64ZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64-LP64ZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64-LP64ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64-LP64ZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
 ; RV64-LP64ZFHMIN-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
index 3a4e4a94cc7f..4897170a82f2 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
@@ -79,9 +79,9 @@ define i32 @callee_double_in_gpr_exhausted_fprs(double %a, double %b, double %c,
 ; RV32-ILP32D-NEXT:    addi sp, sp, -16
 ; RV32-ILP32D-NEXT:    sw a0, 8(sp)
 ; RV32-ILP32D-NEXT:    sw a1, 12(sp)
-; RV32-ILP32D-NEXT:    fld ft0, 8(sp)
+; RV32-ILP32D-NEXT:    fld fa5, 8(sp)
 ; RV32-ILP32D-NEXT:    fcvt.w.d a0, fa7, rtz
-; RV32-ILP32D-NEXT:    fcvt.w.d a1, ft0, rtz
+; RV32-ILP32D-NEXT:    fcvt.w.d a1, fa5, rtz
 ; RV32-ILP32D-NEXT:    add a0, a0, a1
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
@@ -132,8 +132,8 @@ define i32 @callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs(i64 %a, do
 ; RV32-ILP32D-NEXT:    lw a0, 16(sp)
 ; RV32-ILP32D-NEXT:    sw a7, 8(sp)
 ; RV32-ILP32D-NEXT:    sw a0, 12(sp)
-; RV32-ILP32D-NEXT:    fld ft0, 8(sp)
-; RV32-ILP32D-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-ILP32D-NEXT:    fld fa5, 8(sp)
+; RV32-ILP32D-NEXT:    fcvt.w.d a0, fa5, rtz
 ; RV32-ILP32D-NEXT:    add a0, a6, a0
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
 ; RV32-ILP32D-NEXT:    ret
@@ -188,8 +188,8 @@ define i32 @caller_double_in_gpr_and_stack_almost_exhausted_gprs_fprs() nounwind
 define i32 @callee_double_on_stack_exhausted_gprs_fprs(i64 %a, double %b, i64 %c, double %d, i64 %e, double %f, i64 %g, double %h, double %i, double %j, double %k, double %l, double %m) nounwind {
 ; RV32-ILP32D-LABEL: callee_double_on_stack_exhausted_gprs_fprs:
 ; RV32-ILP32D:       # %bb.0:
-; RV32-ILP32D-NEXT:    fld ft0, 0(sp)
-; RV32-ILP32D-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-ILP32D-NEXT:    fld fa5, 0(sp)
+; RV32-ILP32D-NEXT:    fcvt.w.d a0, fa5, rtz
 ; RV32-ILP32D-NEXT:    add a0, a6, a0
 ; RV32-ILP32D-NEXT:    ret
   %g_trunc = trunc i64 %g to i32

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
index c773ebbb3bf2..bb51f71358ad 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
@@ -79,9 +79,9 @@ define i32 @caller_float_in_fpr_exhausted_gprs() nounwind {
 define i32 @callee_float_in_gpr_exhausted_fprs(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i) nounwind {
 ; RV32-ILP32FD-LABEL: callee_float_in_gpr_exhausted_fprs:
 ; RV32-ILP32FD:       # %bb.0:
-; RV32-ILP32FD-NEXT:    fmv.w.x ft0, a0
+; RV32-ILP32FD-NEXT:    fmv.w.x fa5, a0
 ; RV32-ILP32FD-NEXT:    fcvt.w.s a0, fa7, rtz
-; RV32-ILP32FD-NEXT:    fcvt.w.s a1, ft0, rtz
+; RV32-ILP32FD-NEXT:    fcvt.w.s a1, fa5, rtz
 ; RV32-ILP32FD-NEXT:    add a0, a0, a1
 ; RV32-ILP32FD-NEXT:    ret
   %h_fptosi = fptosi float %h to i32
@@ -126,8 +126,8 @@ define i32 @caller_float_in_gpr_exhausted_fprs() nounwind {
 define i32 @callee_float_on_stack_exhausted_gprs_fprs(i64 %a, float %b, i64 %c, float %d, i64 %e, float %f, i64 %g, float %h, float %i, float %j, float %k, float %l, float %m) nounwind {
 ; RV32-ILP32FD-LABEL: callee_float_on_stack_exhausted_gprs_fprs:
 ; RV32-ILP32FD:       # %bb.0:
-; RV32-ILP32FD-NEXT:    flw ft0, 0(sp)
-; RV32-ILP32FD-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-ILP32FD-NEXT:    flw fa5, 0(sp)
+; RV32-ILP32FD-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32-ILP32FD-NEXT:    add a0, a6, a0
 ; RV32-ILP32FD-NEXT:    ret
   %g_trunc = trunc i64 %g to i32

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
index bbd08ae15817..3916bcdc61da 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
@@ -18,10 +18,10 @@ define float @onstack_f32_noop(i64 %a, i64 %b, i64 %c, i64 %d, float %e, float %
 define float @onstack_f32_fadd(i64 %a, i64 %b, i64 %c, i64 %d, float %e, float %f) nounwind {
 ; RV32IF-LABEL: onstack_f32_fadd:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    flw ft0, 4(sp)
-; RV32IF-NEXT:    flw ft1, 0(sp)
-; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    flw fa5, 4(sp)
+; RV32IF-NEXT:    flw fa4, 0(sp)
+; RV32IF-NEXT:    fadd.s fa5, fa4, fa5
+; RV32IF-NEXT:    fmv.x.w a0, fa5
 ; RV32IF-NEXT:    ret
   %1 = fadd float %e, %f
   ret float %1
@@ -56,16 +56,16 @@ define float @caller_onstack_f32_fadd(float %a, float %b) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fadd.s ft2, ft1, ft0
-; RV32IF-NEXT:    fsub.s ft0, ft0, ft1
-; RV32IF-NEXT:    fsw ft0, 4(sp)
+; RV32IF-NEXT:    fmv.w.x fa5, a1
+; RV32IF-NEXT:    fmv.w.x fa4, a0
+; RV32IF-NEXT:    fadd.s fa3, fa4, fa5
+; RV32IF-NEXT:    fsub.s fa5, fa5, fa4
+; RV32IF-NEXT:    fsw fa5, 4(sp)
 ; RV32IF-NEXT:    li a0, 1
 ; RV32IF-NEXT:    li a2, 2
 ; RV32IF-NEXT:    li a4, 3
 ; RV32IF-NEXT:    li a6, 4
-; RV32IF-NEXT:    fsw ft2, 0(sp)
+; RV32IF-NEXT:    fsw fa3, 0(sp)
 ; RV32IF-NEXT:    li a1, 0
 ; RV32IF-NEXT:    li a3, 0
 ; RV32IF-NEXT:    li a5, 0

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll b/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll
index e4217bf92ac8..19a5d9ca03c6 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll
@@ -7,14 +7,14 @@
 define <2 x float> @callee_v2f32(<2 x float> %x, <2 x float> %y) {
 ; RV64-LABEL: callee_v2f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fmv.w.x ft0, a2
-; RV64-NEXT:    fmv.w.x ft1, a0
-; RV64-NEXT:    fmv.w.x ft2, a3
-; RV64-NEXT:    fmv.w.x ft3, a1
-; RV64-NEXT:    fadd.s ft2, ft3, ft2
-; RV64-NEXT:    fadd.s ft0, ft1, ft0
-; RV64-NEXT:    fmv.x.w a0, ft0
-; RV64-NEXT:    fmv.x.w a1, ft2
+; RV64-NEXT:    fmv.w.x fa5, a2
+; RV64-NEXT:    fmv.w.x fa4, a0
+; RV64-NEXT:    fmv.w.x fa3, a3
+; RV64-NEXT:    fmv.w.x fa2, a1
+; RV64-NEXT:    fadd.s fa3, fa2, fa3
+; RV64-NEXT:    fadd.s fa5, fa4, fa5
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    fmv.x.w a1, fa3
 ; RV64-NEXT:    ret
 ;
 ; RV64LP64F-LABEL: callee_v2f32:
@@ -29,34 +29,34 @@ define <2 x float> @callee_v2f32(<2 x float> %x, <2 x float> %y) {
 define <4 x float> @callee_v4f32(<4 x float> %x, <4 x float> %y) {
 ; RV64-LABEL: callee_v4f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fmv.w.x ft0, a4
-; RV64-NEXT:    fmv.w.x ft1, a7
-; RV64-NEXT:    fmv.w.x ft2, a3
-; RV64-NEXT:    fmv.w.x ft3, a6
-; RV64-NEXT:    fmv.w.x ft4, a2
-; RV64-NEXT:    fmv.w.x ft5, a5
-; RV64-NEXT:    fmv.w.x ft6, a1
-; RV64-NEXT:    flw ft7, 0(sp)
-; RV64-NEXT:    fadd.s ft5, ft6, ft5
-; RV64-NEXT:    fadd.s ft3, ft4, ft3
-; RV64-NEXT:    fadd.s ft1, ft2, ft1
-; RV64-NEXT:    fadd.s ft0, ft0, ft7
-; RV64-NEXT:    fsw ft0, 12(a0)
-; RV64-NEXT:    fsw ft1, 8(a0)
-; RV64-NEXT:    fsw ft3, 4(a0)
-; RV64-NEXT:    fsw ft5, 0(a0)
+; RV64-NEXT:    fmv.w.x fa5, a4
+; RV64-NEXT:    fmv.w.x fa4, a7
+; RV64-NEXT:    fmv.w.x fa3, a3
+; RV64-NEXT:    fmv.w.x fa2, a6
+; RV64-NEXT:    fmv.w.x fa1, a2
+; RV64-NEXT:    fmv.w.x fa0, a5
+; RV64-NEXT:    fmv.w.x ft0, a1
+; RV64-NEXT:    flw ft1, 0(sp)
+; RV64-NEXT:    fadd.s fa0, ft0, fa0
+; RV64-NEXT:    fadd.s fa2, fa1, fa2
+; RV64-NEXT:    fadd.s fa4, fa3, fa4
+; RV64-NEXT:    fadd.s fa5, fa5, ft1
+; RV64-NEXT:    fsw fa5, 12(a0)
+; RV64-NEXT:    fsw fa4, 8(a0)
+; RV64-NEXT:    fsw fa2, 4(a0)
+; RV64-NEXT:    fsw fa0, 0(a0)
 ; RV64-NEXT:    ret
 ;
 ; RV64LP64F-LABEL: callee_v4f32:
 ; RV64LP64F:       # %bb.0:
-; RV64LP64F-NEXT:    fadd.s ft0, fa0, fa4
-; RV64LP64F-NEXT:    fadd.s ft1, fa1, fa5
-; RV64LP64F-NEXT:    fadd.s ft2, fa2, fa6
-; RV64LP64F-NEXT:    fadd.s ft3, fa3, fa7
-; RV64LP64F-NEXT:    fsw ft3, 12(a0)
-; RV64LP64F-NEXT:    fsw ft2, 8(a0)
-; RV64LP64F-NEXT:    fsw ft1, 4(a0)
-; RV64LP64F-NEXT:    fsw ft0, 0(a0)
+; RV64LP64F-NEXT:    fadd.s fa4, fa0, fa4
+; RV64LP64F-NEXT:    fadd.s fa5, fa1, fa5
+; RV64LP64F-NEXT:    fadd.s fa2, fa2, fa6
+; RV64LP64F-NEXT:    fadd.s fa3, fa3, fa7
+; RV64LP64F-NEXT:    fsw fa3, 12(a0)
+; RV64LP64F-NEXT:    fsw fa2, 8(a0)
+; RV64LP64F-NEXT:    fsw fa5, 4(a0)
+; RV64LP64F-NEXT:    fsw fa4, 0(a0)
 ; RV64LP64F-NEXT:    ret
   %z = fadd <4 x float> %x, %y
   ret <4 x float> %z

diff  --git a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
index 41aabbb26431..d3f822186a63 100644
--- a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
+++ b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
@@ -125,15 +125,15 @@ define float @lower_constantpool(float %a) nounwind {
 ; RV32I-SMALL-LABEL: lower_constantpool:
 ; RV32I-SMALL:       # %bb.0:
 ; RV32I-SMALL-NEXT:    lui a0, 260096
-; RV32I-SMALL-NEXT:    fmv.w.x ft0, a0
-; RV32I-SMALL-NEXT:    fadd.s fa0, fa0, ft0
+; RV32I-SMALL-NEXT:    fmv.w.x fa5, a0
+; RV32I-SMALL-NEXT:    fadd.s fa0, fa0, fa5
 ; RV32I-SMALL-NEXT:    ret
 ;
 ; RV32I-MEDIUM-LABEL: lower_constantpool:
 ; RV32I-MEDIUM:       # %bb.0:
 ; RV32I-MEDIUM-NEXT:    lui a0, 260096
-; RV32I-MEDIUM-NEXT:    fmv.w.x ft0, a0
-; RV32I-MEDIUM-NEXT:    fadd.s fa0, fa0, ft0
+; RV32I-MEDIUM-NEXT:    fmv.w.x fa5, a0
+; RV32I-MEDIUM-NEXT:    fadd.s fa0, fa0, fa5
 ; RV32I-MEDIUM-NEXT:    ret
   %1 = fadd float %a, 1.0
   ret float %1

diff  --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll
index 2cd05c7035d5..373edfaae782 100644
--- a/llvm/test/CodeGen/RISCV/copysign-casts.ll
+++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll
@@ -67,14 +67,14 @@ define double @fold_promote_d_s(double %a, float %b) nounwind {
 ;
 ; RV32IFD-LABEL: fold_promote_d_s:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.s ft0, fa1
-; RV32IFD-NEXT:    fsgnj.d fa0, fa0, ft0
+; RV32IFD-NEXT:    fcvt.d.s fa5, fa1
+; RV32IFD-NEXT:    fsgnj.d fa0, fa0, fa5
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fold_promote_d_s:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.d.s ft0, fa1
-; RV64IFD-NEXT:    fsgnj.d fa0, fa0, ft0
+; RV64IFD-NEXT:    fcvt.d.s fa5, fa1
+; RV64IFD-NEXT:    fsgnj.d fa0, fa0, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32IFZFH-LABEL: fold_promote_d_s:
@@ -89,14 +89,14 @@ define double @fold_promote_d_s(double %a, float %b) nounwind {
 ;
 ; RV32IFDZFH-LABEL: fold_promote_d_s:
 ; RV32IFDZFH:       # %bb.0:
-; RV32IFDZFH-NEXT:    fcvt.d.s ft0, fa1
-; RV32IFDZFH-NEXT:    fsgnj.d fa0, fa0, ft0
+; RV32IFDZFH-NEXT:    fcvt.d.s fa5, fa1
+; RV32IFDZFH-NEXT:    fsgnj.d fa0, fa0, fa5
 ; RV32IFDZFH-NEXT:    ret
 ;
 ; RV64IFDZFH-LABEL: fold_promote_d_s:
 ; RV64IFDZFH:       # %bb.0:
-; RV64IFDZFH-NEXT:    fcvt.d.s ft0, fa1
-; RV64IFDZFH-NEXT:    fsgnj.d fa0, fa0, ft0
+; RV64IFDZFH-NEXT:    fcvt.d.s fa5, fa1
+; RV64IFDZFH-NEXT:    fsgnj.d fa0, fa0, fa5
 ; RV64IFDZFH-NEXT:    ret
 ;
 ; RV32IFZFHMIN-LABEL: fold_promote_d_s:
@@ -111,14 +111,14 @@ define double @fold_promote_d_s(double %a, float %b) nounwind {
 ;
 ; RV32IFDZFHMIN-LABEL: fold_promote_d_s:
 ; RV32IFDZFHMIN:       # %bb.0:
-; RV32IFDZFHMIN-NEXT:    fcvt.d.s ft0, fa1
-; RV32IFDZFHMIN-NEXT:    fsgnj.d fa0, fa0, ft0
+; RV32IFDZFHMIN-NEXT:    fcvt.d.s fa5, fa1
+; RV32IFDZFHMIN-NEXT:    fsgnj.d fa0, fa0, fa5
 ; RV32IFDZFHMIN-NEXT:    ret
 ;
 ; RV64IFDZFHMIN-LABEL: fold_promote_d_s:
 ; RV64IFDZFHMIN:       # %bb.0:
-; RV64IFDZFHMIN-NEXT:    fcvt.d.s ft0, fa1
-; RV64IFDZFHMIN-NEXT:    fsgnj.d fa0, fa0, ft0
+; RV64IFDZFHMIN-NEXT:    fcvt.d.s fa5, fa1
+; RV64IFDZFHMIN-NEXT:    fsgnj.d fa0, fa0, fa5
 ; RV64IFDZFHMIN-NEXT:    ret
   %c = fpext float %b to double
   %t = call double @llvm.copysign.f64(double %a, double %c)
@@ -165,8 +165,8 @@ define double @fold_promote_d_h(double %a, half %b) nounwind {
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    fmv.x.w a0, fa1
 ; RV32IFD-NEXT:    call __extendhfsf2 at plt
-; RV32IFD-NEXT:    fcvt.d.s ft0, fa0
-; RV32IFD-NEXT:    fsgnj.d fa0, fs0, ft0
+; RV32IFD-NEXT:    fcvt.d.s fa5, fa0
+; RV32IFD-NEXT:    fsgnj.d fa0, fs0, fa5
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -180,8 +180,8 @@ define double @fold_promote_d_h(double %a, half %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d fs0, fa0
 ; RV64IFD-NEXT:    fmv.x.w a0, fa1
 ; RV64IFD-NEXT:    call __extendhfsf2 at plt
-; RV64IFD-NEXT:    fcvt.d.s ft0, fa0
-; RV64IFD-NEXT:    fsgnj.d fa0, fs0, ft0
+; RV64IFD-NEXT:    fcvt.d.s fa5, fa0
+; RV64IFD-NEXT:    fsgnj.d fa0, fs0, fa5
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -200,14 +200,14 @@ define double @fold_promote_d_h(double %a, half %b) nounwind {
 ;
 ; RV32IFDZFH-LABEL: fold_promote_d_h:
 ; RV32IFDZFH:       # %bb.0:
-; RV32IFDZFH-NEXT:    fcvt.d.h ft0, fa1
-; RV32IFDZFH-NEXT:    fsgnj.d fa0, fa0, ft0
+; RV32IFDZFH-NEXT:    fcvt.d.h fa5, fa1
+; RV32IFDZFH-NEXT:    fsgnj.d fa0, fa0, fa5
 ; RV32IFDZFH-NEXT:    ret
 ;
 ; RV64IFDZFH-LABEL: fold_promote_d_h:
 ; RV64IFDZFH:       # %bb.0:
-; RV64IFDZFH-NEXT:    fcvt.d.h ft0, fa1
-; RV64IFDZFH-NEXT:    fsgnj.d fa0, fa0, ft0
+; RV64IFDZFH-NEXT:    fcvt.d.h fa5, fa1
+; RV64IFDZFH-NEXT:    fsgnj.d fa0, fa0, fa5
 ; RV64IFDZFH-NEXT:    ret
 ;
 ; RV32IFZFHMIN-LABEL: fold_promote_d_h:
@@ -223,14 +223,14 @@ define double @fold_promote_d_h(double %a, half %b) nounwind {
 ;
 ; RV32IFDZFHMIN-LABEL: fold_promote_d_h:
 ; RV32IFDZFHMIN:       # %bb.0:
-; RV32IFDZFHMIN-NEXT:    fcvt.d.h ft0, fa1
-; RV32IFDZFHMIN-NEXT:    fsgnj.d fa0, fa0, ft0
+; RV32IFDZFHMIN-NEXT:    fcvt.d.h fa5, fa1
+; RV32IFDZFHMIN-NEXT:    fsgnj.d fa0, fa0, fa5
 ; RV32IFDZFHMIN-NEXT:    ret
 ;
 ; RV64IFDZFHMIN-LABEL: fold_promote_d_h:
 ; RV64IFDZFHMIN:       # %bb.0:
-; RV64IFDZFHMIN-NEXT:    fcvt.d.h ft0, fa1
-; RV64IFDZFHMIN-NEXT:    fsgnj.d fa0, fa0, ft0
+; RV64IFDZFHMIN-NEXT:    fcvt.d.h fa5, fa1
+; RV64IFDZFHMIN-NEXT:    fsgnj.d fa0, fa0, fa5
 ; RV64IFDZFHMIN-NEXT:    ret
   %c = fpext half %b to double
   %t = call double @llvm.copysign.f64(double %a, double %c)
@@ -302,38 +302,38 @@ define float @fold_promote_f_h(float %a, half %b) nounwind {
 ;
 ; RV32IFZFH-LABEL: fold_promote_f_h:
 ; RV32IFZFH:       # %bb.0:
-; RV32IFZFH-NEXT:    fcvt.s.h ft0, fa1
-; RV32IFZFH-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV32IFZFH-NEXT:    fcvt.s.h fa5, fa1
+; RV32IFZFH-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV32IFZFH-NEXT:    ret
 ;
 ; RV32IFDZFH-LABEL: fold_promote_f_h:
 ; RV32IFDZFH:       # %bb.0:
-; RV32IFDZFH-NEXT:    fcvt.s.h ft0, fa1
-; RV32IFDZFH-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV32IFDZFH-NEXT:    fcvt.s.h fa5, fa1
+; RV32IFDZFH-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV32IFDZFH-NEXT:    ret
 ;
 ; RV64IFDZFH-LABEL: fold_promote_f_h:
 ; RV64IFDZFH:       # %bb.0:
-; RV64IFDZFH-NEXT:    fcvt.s.h ft0, fa1
-; RV64IFDZFH-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV64IFDZFH-NEXT:    fcvt.s.h fa5, fa1
+; RV64IFDZFH-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV64IFDZFH-NEXT:    ret
 ;
 ; RV32IFZFHMIN-LABEL: fold_promote_f_h:
 ; RV32IFZFHMIN:       # %bb.0:
-; RV32IFZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IFZFHMIN-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV32IFZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IFZFHMIN-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV32IFZFHMIN-NEXT:    ret
 ;
 ; RV32IFDZFHMIN-LABEL: fold_promote_f_h:
 ; RV32IFDZFHMIN:       # %bb.0:
-; RV32IFDZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IFDZFHMIN-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV32IFDZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IFDZFHMIN-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV32IFDZFHMIN-NEXT:    ret
 ;
 ; RV64IFDZFHMIN-LABEL: fold_promote_f_h:
 ; RV64IFDZFHMIN:       # %bb.0:
-; RV64IFDZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IFDZFHMIN-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV64IFDZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IFDZFHMIN-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV64IFDZFHMIN-NEXT:    ret
   %c = fpext half %b to float
   %t = call float @llvm.copysign.f32(float %a, float %c)
@@ -362,56 +362,56 @@ define float @fold_demote_s_d(float %a, double %b) nounwind {
 ;
 ; RV32IF-LABEL: fold_demote_s_d:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a1
+; RV32IF-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV32IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: fold_demote_s_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.s.d ft0, fa1
-; RV32IFD-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV32IFD-NEXT:    fcvt.s.d fa5, fa1
+; RV32IFD-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fold_demote_s_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.s.d ft0, fa1
-; RV64IFD-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV64IFD-NEXT:    fcvt.s.d fa5, fa1
+; RV64IFD-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32IFZFH-LABEL: fold_demote_s_d:
 ; RV32IFZFH:       # %bb.0:
-; RV32IFZFH-NEXT:    fmv.w.x ft0, a1
-; RV32IFZFH-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV32IFZFH-NEXT:    fmv.w.x fa5, a1
+; RV32IFZFH-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV32IFZFH-NEXT:    ret
 ;
 ; RV32IFDZFH-LABEL: fold_demote_s_d:
 ; RV32IFDZFH:       # %bb.0:
-; RV32IFDZFH-NEXT:    fcvt.s.d ft0, fa1
-; RV32IFDZFH-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV32IFDZFH-NEXT:    fcvt.s.d fa5, fa1
+; RV32IFDZFH-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV32IFDZFH-NEXT:    ret
 ;
 ; RV64IFDZFH-LABEL: fold_demote_s_d:
 ; RV64IFDZFH:       # %bb.0:
-; RV64IFDZFH-NEXT:    fcvt.s.d ft0, fa1
-; RV64IFDZFH-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV64IFDZFH-NEXT:    fcvt.s.d fa5, fa1
+; RV64IFDZFH-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV64IFDZFH-NEXT:    ret
 ;
 ; RV32IFZFHMIN-LABEL: fold_demote_s_d:
 ; RV32IFZFHMIN:       # %bb.0:
-; RV32IFZFHMIN-NEXT:    fmv.w.x ft0, a1
-; RV32IFZFHMIN-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV32IFZFHMIN-NEXT:    fmv.w.x fa5, a1
+; RV32IFZFHMIN-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV32IFZFHMIN-NEXT:    ret
 ;
 ; RV32IFDZFHMIN-LABEL: fold_demote_s_d:
 ; RV32IFDZFHMIN:       # %bb.0:
-; RV32IFDZFHMIN-NEXT:    fcvt.s.d ft0, fa1
-; RV32IFDZFHMIN-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV32IFDZFHMIN-NEXT:    fcvt.s.d fa5, fa1
+; RV32IFDZFHMIN-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV32IFDZFHMIN-NEXT:    ret
 ;
 ; RV64IFDZFHMIN-LABEL: fold_demote_s_d:
 ; RV64IFDZFHMIN:       # %bb.0:
-; RV64IFDZFHMIN-NEXT:    fcvt.s.d ft0, fa1
-; RV64IFDZFHMIN-NEXT:    fsgnj.s fa0, fa0, ft0
+; RV64IFDZFHMIN-NEXT:    fcvt.s.d fa5, fa1
+; RV64IFDZFHMIN-NEXT:    fsgnj.s fa0, fa0, fa5
 ; RV64IFDZFHMIN-NEXT:    ret
   %c = fptrunc double %b to float
   %t = call float @llvm.copysign.f32(float %a, float %c)
@@ -485,20 +485,20 @@ define half @fold_demote_h_s(half %a, float %b) nounwind {
 ;
 ; RV32IFZFH-LABEL: fold_demote_h_s:
 ; RV32IFZFH:       # %bb.0:
-; RV32IFZFH-NEXT:    fcvt.h.s ft0, fa1
-; RV32IFZFH-NEXT:    fsgnj.h fa0, fa0, ft0
+; RV32IFZFH-NEXT:    fcvt.h.s fa5, fa1
+; RV32IFZFH-NEXT:    fsgnj.h fa0, fa0, fa5
 ; RV32IFZFH-NEXT:    ret
 ;
 ; RV32IFDZFH-LABEL: fold_demote_h_s:
 ; RV32IFDZFH:       # %bb.0:
-; RV32IFDZFH-NEXT:    fcvt.h.s ft0, fa1
-; RV32IFDZFH-NEXT:    fsgnj.h fa0, fa0, ft0
+; RV32IFDZFH-NEXT:    fcvt.h.s fa5, fa1
+; RV32IFDZFH-NEXT:    fsgnj.h fa0, fa0, fa5
 ; RV32IFDZFH-NEXT:    ret
 ;
 ; RV64IFDZFH-LABEL: fold_demote_h_s:
 ; RV64IFDZFH:       # %bb.0:
-; RV64IFDZFH-NEXT:    fcvt.h.s ft0, fa1
-; RV64IFDZFH-NEXT:    fsgnj.h fa0, fa0, ft0
+; RV64IFDZFH-NEXT:    fcvt.h.s fa5, fa1
+; RV64IFDZFH-NEXT:    fsgnj.h fa0, fa0, fa5
 ; RV64IFDZFH-NEXT:    ret
 ;
 ; RV32IFZFHMIN-LABEL: fold_demote_h_s:
@@ -623,20 +623,20 @@ define half @fold_demote_h_d(half %a, double %b) nounwind {
 ; RV32IFZFH-LABEL: fold_demote_h_d:
 ; RV32IFZFH:       # %bb.0:
 ; RV32IFZFH-NEXT:    srli a1, a1, 16
-; RV32IFZFH-NEXT:    fmv.h.x ft0, a1
-; RV32IFZFH-NEXT:    fsgnj.h fa0, fa0, ft0
+; RV32IFZFH-NEXT:    fmv.h.x fa5, a1
+; RV32IFZFH-NEXT:    fsgnj.h fa0, fa0, fa5
 ; RV32IFZFH-NEXT:    ret
 ;
 ; RV32IFDZFH-LABEL: fold_demote_h_d:
 ; RV32IFDZFH:       # %bb.0:
-; RV32IFDZFH-NEXT:    fcvt.h.d ft0, fa1
-; RV32IFDZFH-NEXT:    fsgnj.h fa0, fa0, ft0
+; RV32IFDZFH-NEXT:    fcvt.h.d fa5, fa1
+; RV32IFDZFH-NEXT:    fsgnj.h fa0, fa0, fa5
 ; RV32IFDZFH-NEXT:    ret
 ;
 ; RV64IFDZFH-LABEL: fold_demote_h_d:
 ; RV64IFDZFH:       # %bb.0:
-; RV64IFDZFH-NEXT:    fcvt.h.d ft0, fa1
-; RV64IFDZFH-NEXT:    fsgnj.h fa0, fa0, ft0
+; RV64IFDZFH-NEXT:    fcvt.h.d fa5, fa1
+; RV64IFDZFH-NEXT:    fsgnj.h fa0, fa0, fa5
 ; RV64IFDZFH-NEXT:    ret
 ;
 ; RV32IFZFHMIN-LABEL: fold_demote_h_d:
@@ -644,8 +644,8 @@ define half @fold_demote_h_d(half %a, double %b) nounwind {
 ; RV32IFZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IFZFHMIN-NEXT:    fsh fa0, 8(sp)
 ; RV32IFZFHMIN-NEXT:    srli a1, a1, 16
-; RV32IFZFHMIN-NEXT:    fmv.h.x ft0, a1
-; RV32IFZFHMIN-NEXT:    fsh ft0, 12(sp)
+; RV32IFZFHMIN-NEXT:    fmv.h.x fa5, a1
+; RV32IFZFHMIN-NEXT:    fsh fa5, 12(sp)
 ; RV32IFZFHMIN-NEXT:    lbu a0, 9(sp)
 ; RV32IFZFHMIN-NEXT:    lbu a1, 13(sp)
 ; RV32IFZFHMIN-NEXT:    andi a0, a0, 127

diff  --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
index 7bd315439951..aa278b7513e4 100644
--- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
@@ -263,16 +263,16 @@ declare double @llvm.experimental.constrained.fma.f64(double, double, double, me
 define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fmsub_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV32IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV32IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_d:
@@ -339,18 +339,18 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
 define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fnmadd_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft1, fa0, ft0
-; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV32IFD-NEXT:    fnmadd.d fa0, ft1, fa1, ft0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa4, fa0, fa5
+; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV32IFD-NEXT:    fnmadd.d fa0, fa4, fa1, fa5
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmadd_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft1, fa0, ft0
-; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV64IFD-NEXT:    fnmadd.d fa0, ft1, fa1, ft0
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa4, fa0, fa5
+; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa1, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d:
@@ -435,18 +435,18 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
 define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fnmadd_d_2:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft1, fa1, ft0
-; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV32IFD-NEXT:    fnmadd.d fa0, ft1, fa0, ft0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa4, fa1, fa5
+; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV32IFD-NEXT:    fnmadd.d fa0, fa4, fa0, fa5
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmadd_d_2:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft1, fa1, ft0
-; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV64IFD-NEXT:    fnmadd.d fa0, ft1, fa0, ft0
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa4, fa1, fa5
+; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa0, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d_2:
@@ -532,16 +532,16 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
 define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fnmsub_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft0, fa0, ft0
-; RV32IFD-NEXT:    fnmsub.d fa0, ft0, fa1, fa2
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa5, fa0, fa5
+; RV32IFD-NEXT:    fnmsub.d fa0, fa5, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft0, fa0, ft0
-; RV64IFD-NEXT:    fnmsub.d fa0, ft0, fa1, fa2
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa5, fa0, fa5
+; RV64IFD-NEXT:    fnmsub.d fa0, fa5, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_d:
@@ -604,16 +604,16 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
 define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV32IFD-LABEL: fnmsub_d_2:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft0, fa1, ft0
-; RV32IFD-NEXT:    fnmsub.d fa0, ft0, fa0, fa2
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa5, fa1, fa5
+; RV32IFD-NEXT:    fnmsub.d fa0, fa5, fa0, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmsub_d_2:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft0, fa1, ft0
-; RV64IFD-NEXT:    fnmsub.d fa0, ft0, fa0, fa2
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa5, fa1, fa5
+; RV64IFD-NEXT:    fnmsub.d fa0, fa5, fa0, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_d_2:

diff  --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index f2d94460d2dd..708271c0f43f 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -184,9 +184,9 @@ define double @fsgnj_d(double %a, double %b) nounwind {
 define i32 @fneg_d(double %a, double %b) nounwind {
 ; CHECKIFD-LABEL: fneg_d:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    fadd.d ft0, fa0, fa0
-; CHECKIFD-NEXT:    fneg.d ft1, ft0
-; CHECKIFD-NEXT:    feq.d a0, ft0, ft1
+; CHECKIFD-NEXT:    fadd.d fa5, fa0, fa0
+; CHECKIFD-NEXT:    fneg.d fa4, fa5
+; CHECKIFD-NEXT:    feq.d a0, fa5, fa4
 ; CHECKIFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fneg_d:
@@ -266,9 +266,9 @@ declare double @llvm.fabs.f64(double)
 define double @fabs_d(double %a, double %b) nounwind {
 ; CHECKIFD-LABEL: fabs_d:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    fadd.d ft0, fa0, fa1
-; CHECKIFD-NEXT:    fabs.d ft1, ft0
-; CHECKIFD-NEXT:    fadd.d fa0, ft1, ft0
+; CHECKIFD-NEXT:    fadd.d fa5, fa0, fa1
+; CHECKIFD-NEXT:    fabs.d fa4, fa5
+; CHECKIFD-NEXT:    fadd.d fa0, fa4, fa5
 ; CHECKIFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fabs_d:
@@ -393,16 +393,16 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind {
 define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fmsub_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV32IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV32IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_d:
@@ -469,18 +469,18 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmadd_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft1, fa0, ft0
-; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV32IFD-NEXT:    fnmadd.d fa0, ft1, fa1, ft0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa4, fa0, fa5
+; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV32IFD-NEXT:    fnmadd.d fa0, fa4, fa1, fa5
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmadd_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft1, fa0, ft0
-; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV64IFD-NEXT:    fnmadd.d fa0, ft1, fa1, ft0
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa4, fa0, fa5
+; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa1, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d:
@@ -565,18 +565,18 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmadd_d_2:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft1, fa1, ft0
-; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV32IFD-NEXT:    fnmadd.d fa0, ft1, fa0, ft0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa4, fa1, fa5
+; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV32IFD-NEXT:    fnmadd.d fa0, fa4, fa0, fa5
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmadd_d_2:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft1, fa1, ft0
-; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV64IFD-NEXT:    fnmadd.d fa0, ft1, fa0, ft0
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa4, fa1, fa5
+; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa0, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d_2:
@@ -662,8 +662,8 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 define double @fnmadd_d_3(double %a, double %b, double %c) nounwind {
 ; CHECKIFD-LABEL: fnmadd_d_3:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    fmadd.d ft0, fa0, fa1, fa2
-; CHECKIFD-NEXT:    fneg.d fa0, ft0
+; CHECKIFD-NEXT:    fmadd.d fa5, fa0, fa1, fa2
+; CHECKIFD-NEXT:    fneg.d fa0, fa5
 ; CHECKIFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d_3:
@@ -730,16 +730,16 @@ define double @fnmadd_nsz(double %a, double %b, double %c) nounwind {
 define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmsub_d:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft0, fa0, ft0
-; RV32IFD-NEXT:    fnmsub.d fa0, ft0, fa1, fa2
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa5, fa0, fa5
+; RV32IFD-NEXT:    fnmsub.d fa0, fa5, fa1, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft0, fa0, ft0
-; RV64IFD-NEXT:    fnmsub.d fa0, ft0, fa1, fa2
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa5, fa0, fa5
+; RV64IFD-NEXT:    fnmsub.d fa0, fa5, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_d:
@@ -802,16 +802,16 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmsub_d_2:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft0, fa1, ft0
-; RV32IFD-NEXT:    fnmsub.d fa0, ft0, fa0, fa2
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa5, fa1, fa5
+; RV32IFD-NEXT:    fnmsub.d fa0, fa5, fa0, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmsub_d_2:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft0, fa1, ft0
-; RV64IFD-NEXT:    fnmsub.d fa0, ft0, fa0, fa2
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa5, fa1, fa5
+; RV64IFD-NEXT:    fnmsub.d fa0, fa5, fa0, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_d_2:
@@ -920,16 +920,16 @@ define double @fmadd_d_contract(double %a, double %b, double %c) nounwind {
 define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fmsub_d_contract:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV32IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV32IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmsub_d_contract:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, ft0
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_d_contract:
@@ -1004,20 +1004,20 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmadd_d_contract:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft1, fa0, ft0
-; RV32IFD-NEXT:    fadd.d ft2, fa1, ft0
-; RV32IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV32IFD-NEXT:    fnmadd.d fa0, ft1, ft2, ft0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa4, fa0, fa5
+; RV32IFD-NEXT:    fadd.d fa3, fa1, fa5
+; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV32IFD-NEXT:    fnmadd.d fa0, fa4, fa3, fa5
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmadd_d_contract:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft1, fa0, ft0
-; RV64IFD-NEXT:    fadd.d ft2, fa1, ft0
-; RV64IFD-NEXT:    fadd.d ft0, fa2, ft0
-; RV64IFD-NEXT:    fnmadd.d fa0, ft1, ft2, ft0
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa4, fa0, fa5
+; RV64IFD-NEXT:    fadd.d fa3, fa1, fa5
+; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
+; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa3, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_d_contract:
@@ -1119,18 +1119,18 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32IFD-LABEL: fnmsub_d_contract:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fadd.d ft1, fa0, ft0
-; RV32IFD-NEXT:    fadd.d ft0, fa1, ft0
-; RV32IFD-NEXT:    fnmsub.d fa0, ft1, ft0, fa2
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fadd.d fa4, fa0, fa5
+; RV32IFD-NEXT:    fadd.d fa5, fa1, fa5
+; RV32IFD-NEXT:    fnmsub.d fa0, fa4, fa5, fa2
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fnmsub_d_contract:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, zero
-; RV64IFD-NEXT:    fadd.d ft1, fa0, ft0
-; RV64IFD-NEXT:    fadd.d ft0, fa1, ft0
-; RV64IFD-NEXT:    fnmsub.d fa0, ft1, ft0, fa2
+; RV64IFD-NEXT:    fmv.d.x fa5, zero
+; RV64IFD-NEXT:    fadd.d fa4, fa0, fa5
+; RV64IFD-NEXT:    fadd.d fa5, fa1, fa5
+; RV64IFD-NEXT:    fnmsub.d fa0, fa4, fa5, fa2
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_d_contract:

diff  --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
index 25ffd32f5ccb..5bf27f5dded3 100644
--- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
@@ -99,12 +99,12 @@ define double @fcopysign_fneg(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw a2, 8(sp)
 ; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fld fa5, 8(sp)
 ; RV32IFD-NEXT:    sw a0, 8(sp)
 ; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fsgnjn.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    fld fa4, 8(sp)
+; RV32IFD-NEXT:    fsgnjn.d fa5, fa4, fa5
+; RV32IFD-NEXT:    fsd fa5, 8(sp)
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -125,10 +125,10 @@ define double @fcopysign_fneg(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    li a2, -1
 ; RV64IFD-NEXT:    slli a2, a2, 63
 ; RV64IFD-NEXT:    xor a1, a1, a2
-; RV64IFD-NEXT:    fmv.d.x ft0, a1
-; RV64IFD-NEXT:    fmv.d.x ft1, a0
-; RV64IFD-NEXT:    fsgnj.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x fa5, a1
+; RV64IFD-NEXT:    fmv.d.x fa4, a0
+; RV64IFD-NEXT:    fsgnj.d fa5, fa4, fa5
+; RV64IFD-NEXT:    fmv.x.d a0, fa5
 ; RV64IFD-NEXT:    ret
   %1 = fneg double %b
   %2 = call double @llvm.copysign.f64(double %a, double %1)

diff  --git a/llvm/test/CodeGen/RISCV/double-calling-conv.ll b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
index 570d9d7f13e7..d1ecc3dc71f7 100644
--- a/llvm/test/CodeGen/RISCV/double-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
@@ -13,12 +13,12 @@ define double @callee_double_inreg(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw a2, 8(sp)
 ; RV32IFD-NEXT:    sw a3, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fld fa5, 8(sp)
 ; RV32IFD-NEXT:    sw a0, 8(sp)
 ; RV32IFD-NEXT:    sw a1, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    fld fa4, 8(sp)
+; RV32IFD-NEXT:    fadd.d fa5, fa4, fa5
+; RV32IFD-NEXT:    fsd fa5, 8(sp)
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -56,12 +56,12 @@ define double @callee_double_split_reg_stack(i32 %a, i64 %b, i64 %c, double %d,
 ; RV32IFD-NEXT:    lw a0, 16(sp)
 ; RV32IFD-NEXT:    sw a7, 8(sp)
 ; RV32IFD-NEXT:    sw a0, 12(sp)
-; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fld fa5, 8(sp)
 ; RV32IFD-NEXT:    sw a5, 8(sp)
 ; RV32IFD-NEXT:    sw a6, 12(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    fld fa4, 8(sp)
+; RV32IFD-NEXT:    fadd.d fa5, fa4, fa5
+; RV32IFD-NEXT:    fsd fa5, 8(sp)
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -100,10 +100,10 @@ define double @callee_double_stack(i64 %a, i64 %b, i64 %c, i64 %d, double %e, do
 ; RV32IFD-LABEL: callee_double_stack:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fld ft0, 24(sp)
-; RV32IFD-NEXT:    fld ft1, 16(sp)
-; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    fld fa5, 24(sp)
+; RV32IFD-NEXT:    fld fa4, 16(sp)
+; RV32IFD-NEXT:    fadd.d fa5, fa4, fa5
+; RV32IFD-NEXT:    fsd fa5, 8(sp)
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
index b353d425098c..1c8291d3ac27 100644
--- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
@@ -556,15 +556,15 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32IFD-LABEL: fcvt_d_w_demanded_bits:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi a0, a0, 1
-; RV32IFD-NEXT:    fcvt.d.w ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 0(a1)
+; RV32IFD-NEXT:    fcvt.d.w fa5, a0
+; RV32IFD-NEXT:    fsd fa5, 0(a1)
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_w_demanded_bits:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addiw a0, a0, 1
-; RV64IFD-NEXT:    fcvt.d.w ft0, a0
-; RV64IFD-NEXT:    fsd ft0, 0(a1)
+; RV64IFD-NEXT:    fcvt.d.w fa5, a0
+; RV64IFD-NEXT:    fsd fa5, 0(a1)
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_w_demanded_bits:
@@ -614,15 +614,15 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32IFD-LABEL: fcvt_d_wu_demanded_bits:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi a0, a0, 1
-; RV32IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 0(a1)
+; RV32IFD-NEXT:    fcvt.d.wu fa5, a0
+; RV32IFD-NEXT:    fsd fa5, 0(a1)
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_wu_demanded_bits:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addiw a0, a0, 1
-; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV64IFD-NEXT:    fsd ft0, 0(a1)
+; RV64IFD-NEXT:    fcvt.d.wu fa5, a0
+; RV64IFD-NEXT:    fsd fa5, 0(a1)
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_wu_demanded_bits:

diff  --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index e84be956b567..b4f9e3b9e2a7 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -534,9 +534,9 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI12_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
-; RV32IFD-NEXT:    fle.d s0, ft0, fa0
+; RV32IFD-NEXT:    fle.d s0, fa5, fa0
 ; RV32IFD-NEXT:    call __fixdfdi at plt
 ; RV32IFD-NEXT:    lui a3, 524288
 ; RV32IFD-NEXT:    bnez s0, .LBB12_2
@@ -544,8 +544,8 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    lui a1, 524288
 ; RV32IFD-NEXT:  .LBB12_2: # %start
 ; RV32IFD-NEXT:    lui a2, %hi(.LCPI12_1)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI12_1)(a2)
-; RV32IFD-NEXT:    flt.d a2, ft0, fs0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI12_1)(a2)
+; RV32IFD-NEXT:    flt.d a2, fa5, fs0
 ; RV32IFD-NEXT:    beqz a2, .LBB12_4
 ; RV32IFD-NEXT:  # %bb.3:
 ; RV32IFD-NEXT:    addi a1, a3, -1
@@ -750,14 +750,14 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fle.d a0, ft0, fa0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s0, a0
 ; RV32IFD-NEXT:    call __fixunsdfdi at plt
 ; RV32IFD-NEXT:    lui a2, %hi(.LCPI14_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI14_0)(a2)
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI14_0)(a2)
 ; RV32IFD-NEXT:    and a0, s0, a0
-; RV32IFD-NEXT:    flt.d a2, ft0, fs0
+; RV32IFD-NEXT:    flt.d a2, fa5, fs0
 ; RV32IFD-NEXT:    neg a2, a2
 ; RV32IFD-NEXT:    or a0, a2, a0
 ; RV32IFD-NEXT:    and a1, s0, a1
@@ -873,8 +873,8 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fmv_x_d:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    fadd.d ft0, fa0, fa1
-; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    fadd.d fa5, fa0, fa1
+; RV32IFD-NEXT:    fsd fa5, 8(sp)
 ; RV32IFD-NEXT:    lw a0, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -882,8 +882,8 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
 ;
 ; RV64IFD-LABEL: fmv_x_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fadd.d ft0, fa0, fa1
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fadd.d fa5, fa0, fa1
+; RV64IFD-NEXT:    fmv.x.d a0, fa5
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmv_x_d:
@@ -989,17 +989,17 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
 ; RV32IFD-NEXT:    sw a2, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 12(sp)
 ; RV32IFD-NEXT:    sw a0, 8(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp)
-; RV32IFD-NEXT:    fadd.d fa0, ft1, ft0
+; RV32IFD-NEXT:    fld fa5, 0(sp)
+; RV32IFD-NEXT:    fld fa4, 8(sp)
+; RV32IFD-NEXT:    fadd.d fa0, fa4, fa5
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fmv_d_x:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fadd.d fa0, ft0, ft1
+; RV64IFD-NEXT:    fmv.d.x fa5, a0
+; RV64IFD-NEXT:    fmv.d.x fa4, a1
+; RV64IFD-NEXT:    fadd.d fa0, fa5, fa4
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fmv_d_x:
@@ -1138,15 +1138,15 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32IFD-LABEL: fcvt_d_w_demanded_bits:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi a0, a0, 1
-; RV32IFD-NEXT:    fcvt.d.w ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 0(a1)
+; RV32IFD-NEXT:    fcvt.d.w fa5, a0
+; RV32IFD-NEXT:    fsd fa5, 0(a1)
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_w_demanded_bits:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addiw a0, a0, 1
-; RV64IFD-NEXT:    fcvt.d.w ft0, a0
-; RV64IFD-NEXT:    fsd ft0, 0(a1)
+; RV64IFD-NEXT:    fcvt.d.w fa5, a0
+; RV64IFD-NEXT:    fsd fa5, 0(a1)
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_w_demanded_bits:
@@ -1196,15 +1196,15 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32IFD-LABEL: fcvt_d_wu_demanded_bits:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi a0, a0, 1
-; RV32IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV32IFD-NEXT:    fsd ft0, 0(a1)
+; RV32IFD-NEXT:    fcvt.d.wu fa5, a0
+; RV32IFD-NEXT:    fsd fa5, 0(a1)
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_d_wu_demanded_bits:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addiw a0, a0, 1
-; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
-; RV64IFD-NEXT:    fsd ft0, 0(a1)
+; RV64IFD-NEXT:    fcvt.d.wu fa5, a0
+; RV64IFD-NEXT:    fsd fa5, 0(a1)
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_d_wu_demanded_bits:
@@ -1285,12 +1285,12 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_w_s_sat_i16:
 ; RV32IFD:       # %bb.0: # %start
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI26_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI26_0)(a0)
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI26_1)
-; RV32IFD-NEXT:    fld ft1, %lo(.LCPI26_1)(a0)
-; RV32IFD-NEXT:    fmax.d ft0, fa0, ft0
-; RV32IFD-NEXT:    fmin.d ft0, ft0, ft1
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32IFD-NEXT:    fld fa4, %lo(.LCPI26_1)(a0)
+; RV32IFD-NEXT:    fmax.d fa5, fa0, fa5
+; RV32IFD-NEXT:    fmin.d fa5, fa5, fa4
+; RV32IFD-NEXT:    fcvt.w.d a0, fa5, rtz
 ; RV32IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV32IFD-NEXT:    seqz a1, a1
 ; RV32IFD-NEXT:    addi a1, a1, -1
@@ -1300,12 +1300,12 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV64IFD-LABEL: fcvt_w_s_sat_i16:
 ; RV64IFD:       # %bb.0: # %start
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI26_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI26_0)(a0)
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI26_1)
-; RV64IFD-NEXT:    fld ft1, %lo(.LCPI26_1)(a0)
-; RV64IFD-NEXT:    fmax.d ft0, fa0, ft0
-; RV64IFD-NEXT:    fmin.d ft0, ft0, ft1
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    fld fa4, %lo(.LCPI26_1)(a0)
+; RV64IFD-NEXT:    fmax.d fa5, fa0, fa5
+; RV64IFD-NEXT:    fmin.d fa5, fa5, fa4
+; RV64IFD-NEXT:    fcvt.l.d a0, fa5, rtz
 ; RV64IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV64IFD-NEXT:    seqz a1, a1
 ; RV64IFD-NEXT:    addi a1, a1, -1
@@ -1452,21 +1452,21 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_wu_s_sat_i16:
 ; RV32IFD:       # %bb.0: # %start
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI28_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI28_0)(a0)
-; RV32IFD-NEXT:    fcvt.d.w ft1, zero
-; RV32IFD-NEXT:    fmax.d ft1, fa0, ft1
-; RV32IFD-NEXT:    fmin.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI28_0)(a0)
+; RV32IFD-NEXT:    fcvt.d.w fa4, zero
+; RV32IFD-NEXT:    fmax.d fa4, fa0, fa4
+; RV32IFD-NEXT:    fmin.d fa5, fa4, fa5
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa5, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_s_sat_i16:
 ; RV64IFD:       # %bb.0: # %start
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI28_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI28_0)(a0)
-; RV64IFD-NEXT:    fmv.d.x ft1, zero
-; RV64IFD-NEXT:    fmax.d ft1, fa0, ft1
-; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI28_0)(a0)
+; RV64IFD-NEXT:    fmv.d.x fa4, zero
+; RV64IFD-NEXT:    fmax.d fa4, fa0, fa4
+; RV64IFD-NEXT:    fmin.d fa5, fa4, fa5
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa5, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_sat_i16:
@@ -1592,12 +1592,12 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_w_s_sat_i8:
 ; RV32IFD:       # %bb.0: # %start
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI30_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI30_0)(a0)
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI30_0)(a0)
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI30_1)
-; RV32IFD-NEXT:    fld ft1, %lo(.LCPI30_1)(a0)
-; RV32IFD-NEXT:    fmax.d ft0, fa0, ft0
-; RV32IFD-NEXT:    fmin.d ft0, ft0, ft1
-; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32IFD-NEXT:    fld fa4, %lo(.LCPI30_1)(a0)
+; RV32IFD-NEXT:    fmax.d fa5, fa0, fa5
+; RV32IFD-NEXT:    fmin.d fa5, fa5, fa4
+; RV32IFD-NEXT:    fcvt.w.d a0, fa5, rtz
 ; RV32IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV32IFD-NEXT:    seqz a1, a1
 ; RV32IFD-NEXT:    addi a1, a1, -1
@@ -1607,12 +1607,12 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV64IFD-LABEL: fcvt_w_s_sat_i8:
 ; RV64IFD:       # %bb.0: # %start
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI30_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI30_0)(a0)
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI30_0)(a0)
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI30_1)
-; RV64IFD-NEXT:    fld ft1, %lo(.LCPI30_1)(a0)
-; RV64IFD-NEXT:    fmax.d ft0, fa0, ft0
-; RV64IFD-NEXT:    fmin.d ft0, ft0, ft1
-; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    fld fa4, %lo(.LCPI30_1)(a0)
+; RV64IFD-NEXT:    fmax.d fa5, fa0, fa5
+; RV64IFD-NEXT:    fmin.d fa5, fa5, fa4
+; RV64IFD-NEXT:    fcvt.l.d a0, fa5, rtz
 ; RV64IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV64IFD-NEXT:    seqz a1, a1
 ; RV64IFD-NEXT:    addi a1, a1, -1
@@ -1758,21 +1758,21 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_wu_s_sat_i8:
 ; RV32IFD:       # %bb.0: # %start
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI32_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI32_0)(a0)
-; RV32IFD-NEXT:    fcvt.d.w ft1, zero
-; RV32IFD-NEXT:    fmax.d ft1, fa0, ft1
-; RV32IFD-NEXT:    fmin.d ft0, ft1, ft0
-; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI32_0)(a0)
+; RV32IFD-NEXT:    fcvt.d.w fa4, zero
+; RV32IFD-NEXT:    fmax.d fa4, fa0, fa4
+; RV32IFD-NEXT:    fmin.d fa5, fa4, fa5
+; RV32IFD-NEXT:    fcvt.wu.d a0, fa5, rtz
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_s_sat_i8:
 ; RV64IFD:       # %bb.0: # %start
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI32_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI32_0)(a0)
-; RV64IFD-NEXT:    fmv.d.x ft1, zero
-; RV64IFD-NEXT:    fmax.d ft1, fa0, ft1
-; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
-; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI32_0)(a0)
+; RV64IFD-NEXT:    fmv.d.x fa4, zero
+; RV64IFD-NEXT:    fmax.d fa4, fa0, fa4
+; RV64IFD-NEXT:    fmin.d fa5, fa4, fa5
+; RV64IFD-NEXT:    fcvt.lu.d a0, fa5, rtz
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_sat_i8:

diff  --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll
index 9e62920fd03d..d0b0ddd1ca02 100644
--- a/llvm/test/CodeGen/RISCV/double-imm.ll
+++ b/llvm/test/CodeGen/RISCV/double-imm.ll
@@ -17,8 +17,8 @@ define double @double_imm_op(double %a) nounwind {
 ; CHECK-LABEL: double_imm_op:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
+; CHECK-NEXT:    fld fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    fadd.d fa0, fa0, fa5
 ; CHECK-NEXT:    ret
   %1 = fadd double %a, 1.0
   ret double %1

diff  --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index 569f95e017a1..0980ef6b8d9f 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -603,14 +603,14 @@ define double @floor_f64(double %a) nounwind {
 ; RV64IFD-LABEL: floor_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI17_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI17_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB17_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0, rdn
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rdn
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB17_2:
 ; RV64IFD-NEXT:    ret
 ;
@@ -645,14 +645,14 @@ define double @ceil_f64(double %a) nounwind {
 ; RV64IFD-LABEL: ceil_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI18_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI18_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB18_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0, rup
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rup
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB18_2:
 ; RV64IFD-NEXT:    ret
 ;
@@ -687,14 +687,14 @@ define double @trunc_f64(double %a) nounwind {
 ; RV64IFD-LABEL: trunc_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI19_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI19_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB19_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0, rtz
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rtz
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB19_2:
 ; RV64IFD-NEXT:    ret
 ;
@@ -729,14 +729,14 @@ define double @rint_f64(double %a) nounwind {
 ; RV64IFD-LABEL: rint_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI20_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI20_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB20_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB20_2:
 ; RV64IFD-NEXT:    ret
 ;
@@ -799,14 +799,14 @@ define double @round_f64(double %a) nounwind {
 ; RV64IFD-LABEL: round_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI22_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI22_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB22_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0, rmm
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rmm
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB22_2:
 ; RV64IFD-NEXT:    ret
 ;
@@ -841,14 +841,14 @@ define double @roundeven_f64(double %a) nounwind {
 ; RV64IFD-LABEL: roundeven_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI23_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI23_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB23_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0, rne
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rne
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB23_2:
 ; RV64IFD-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index 4867001c31af..7b029de61770 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -7,9 +7,9 @@
 define dso_local double @fld(ptr %a) nounwind {
 ; CHECKIFD-LABEL: fld:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    fld ft0, 0(a0)
-; CHECKIFD-NEXT:    fld ft1, 24(a0)
-; CHECKIFD-NEXT:    fadd.d fa0, ft0, ft1
+; CHECKIFD-NEXT:    fld fa5, 0(a0)
+; CHECKIFD-NEXT:    fld fa4, 24(a0)
+; CHECKIFD-NEXT:    fadd.d fa0, fa5, fa4
 ; CHECKIFD-NEXT:    ret
   %1 = load double, ptr %a
   %2 = getelementptr double, ptr %a, i32 3
@@ -23,9 +23,9 @@ define dso_local double @fld(ptr %a) nounwind {
 define dso_local void @fsd(ptr %a, double %b, double %c) nounwind {
 ; CHECKIFD-LABEL: fsd:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    fadd.d ft0, fa0, fa1
-; CHECKIFD-NEXT:    fsd ft0, 0(a0)
-; CHECKIFD-NEXT:    fsd ft0, 64(a0)
+; CHECKIFD-NEXT:    fadd.d fa5, fa0, fa1
+; CHECKIFD-NEXT:    fsd fa5, 0(a0)
+; CHECKIFD-NEXT:    fsd fa5, 64(a0)
 ; CHECKIFD-NEXT:    ret
 ; Use %b and %c in an FP op to ensure floating point registers are used, even
 ; for the soft float ABI
@@ -44,10 +44,10 @@ define dso_local double @fld_fsd_global(double %a, double %b) nounwind {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fadd.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    lui a0, %hi(G)
-; CHECKIFD-NEXT:    fld ft0, %lo(G)(a0)
+; CHECKIFD-NEXT:    fld fa5, %lo(G)(a0)
 ; CHECKIFD-NEXT:    addi a1, a0, %lo(G)
 ; CHECKIFD-NEXT:    fsd fa0, %lo(G)(a0)
-; CHECKIFD-NEXT:    fld ft0, 72(a1)
+; CHECKIFD-NEXT:    fld fa5, 72(a1)
 ; CHECKIFD-NEXT:    fsd fa0, 72(a1)
 ; CHECKIFD-NEXT:    ret
 ; Use %a and %b in an FP op to ensure floating point registers are used, even
@@ -66,8 +66,8 @@ define dso_local double @fld_fsd_constant(double %a) nounwind {
 ; RV32IFD-LABEL: fld_fsd_constant:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    lui a0, 912092
-; RV32IFD-NEXT:    fld ft0, -273(a0)
-; RV32IFD-NEXT:    fadd.d fa0, fa0, ft0
+; RV32IFD-NEXT:    fld fa5, -273(a0)
+; RV32IFD-NEXT:    fadd.d fa0, fa0, fa5
 ; RV32IFD-NEXT:    fsd fa0, -273(a0)
 ; RV32IFD-NEXT:    ret
 ;
@@ -75,8 +75,8 @@ define dso_local double @fld_fsd_constant(double %a) nounwind {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, 228023
 ; RV64IFD-NEXT:    slli a0, a0, 2
-; RV64IFD-NEXT:    fld ft0, -273(a0)
-; RV64IFD-NEXT:    fadd.d fa0, fa0, ft0
+; RV64IFD-NEXT:    fld fa5, -273(a0)
+; RV64IFD-NEXT:    fadd.d fa0, fa0, fa5
 ; RV64IFD-NEXT:    fsd fa0, -273(a0)
 ; RV64IFD-NEXT:    ret
   %1 = inttoptr i32 3735928559 to ptr
@@ -97,8 +97,8 @@ define dso_local double @fld_stack(double %a) nounwind {
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
 ; RV32IFD-NEXT:    addi a0, sp, 8
 ; RV32IFD-NEXT:    call notdead at plt
-; RV32IFD-NEXT:    fld ft0, 8(sp)
-; RV32IFD-NEXT:    fadd.d fa0, ft0, fs0
+; RV32IFD-NEXT:    fld fa5, 8(sp)
+; RV32IFD-NEXT:    fadd.d fa0, fa5, fs0
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32
@@ -112,8 +112,8 @@ define dso_local double @fld_stack(double %a) nounwind {
 ; RV64IFD-NEXT:    fmv.d fs0, fa0
 ; RV64IFD-NEXT:    addi a0, sp, 8
 ; RV64IFD-NEXT:    call notdead at plt
-; RV64IFD-NEXT:    fld ft0, 8(sp)
-; RV64IFD-NEXT:    fadd.d fa0, ft0, fs0
+; RV64IFD-NEXT:    fld fa5, 8(sp)
+; RV64IFD-NEXT:    fadd.d fa0, fa5, fs0
 ; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 32
@@ -130,8 +130,8 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    fadd.d ft0, fa0, fa1
-; RV32IFD-NEXT:    fsd ft0, 0(sp)
+; RV32IFD-NEXT:    fadd.d fa5, fa0, fa1
+; RV32IFD-NEXT:    fsd fa5, 0(sp)
 ; RV32IFD-NEXT:    mv a0, sp
 ; RV32IFD-NEXT:    call notdead at plt
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -142,8 +142,8 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    fadd.d ft0, fa0, fa1
-; RV64IFD-NEXT:    fsd ft0, 0(sp)
+; RV64IFD-NEXT:    fadd.d fa5, fa0, fa1
+; RV64IFD-NEXT:    fsd fa5, 0(sp)
 ; RV64IFD-NEXT:    mv a0, sp
 ; RV64IFD-NEXT:    call notdead at plt
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -160,8 +160,8 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
 define dso_local void @fsd_trunc(ptr %a, double %b) nounwind noinline optnone {
 ; CHECKIFD-LABEL: fsd_trunc:
 ; CHECKIFD:       # %bb.0:
-; CHECKIFD-NEXT:    fcvt.s.d ft0, fa0
-; CHECKIFD-NEXT:    fsw ft0, 0(a0)
+; CHECKIFD-NEXT:    fcvt.s.d fa5, fa0
+; CHECKIFD-NEXT:    fsw fa5, 0(a0)
 ; CHECKIFD-NEXT:    ret
   %1 = fptrunc double %b to float
   store float %1, ptr %a, align 4

diff  --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
index 09af162ee4a6..1362f089d4e6 100644
--- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll
+++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
@@ -22,13 +22,13 @@ define i32 @main() nounwind {
 ; RV32IFD-NEXT:    call test at plt
 ; RV32IFD-NEXT:    sw a0, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 4(sp)
-; RV32IFD-NEXT:    fld ft0, 0(sp)
+; RV32IFD-NEXT:    fld fa5, 0(sp)
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV32IFD-NEXT:    fld ft1, %lo(.LCPI1_0)(a0)
+; RV32IFD-NEXT:    fld fa4, %lo(.LCPI1_0)(a0)
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI1_1)
-; RV32IFD-NEXT:    fld ft2, %lo(.LCPI1_1)(a0)
-; RV32IFD-NEXT:    flt.d a0, ft0, ft1
-; RV32IFD-NEXT:    flt.d a1, ft2, ft0
+; RV32IFD-NEXT:    fld fa3, %lo(.LCPI1_1)(a0)
+; RV32IFD-NEXT:    flt.d a0, fa5, fa4
+; RV32IFD-NEXT:    flt.d a1, fa3, fa5
 ; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    beqz a0, .LBB1_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.then

diff  --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
index cd3a7ace04fe..fa67e2a4616b 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
@@ -27,9 +27,9 @@ define i64 @test_floor_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    call floor at plt
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI1_0)(a0)
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI1_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
-; RV32IFD-NEXT:    fle.d s0, ft0, fa0
+; RV32IFD-NEXT:    fle.d s0, fa5, fa0
 ; RV32IFD-NEXT:    call __fixdfdi at plt
 ; RV32IFD-NEXT:    lui a3, 524288
 ; RV32IFD-NEXT:    bnez s0, .LBB1_2
@@ -37,8 +37,8 @@ define i64 @test_floor_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    lui a1, 524288
 ; RV32IFD-NEXT:  .LBB1_2:
 ; RV32IFD-NEXT:    lui a2, %hi(.LCPI1_1)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI1_1)(a2)
-; RV32IFD-NEXT:    flt.d a2, ft0, fs0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI1_1)(a2)
+; RV32IFD-NEXT:    flt.d a2, fa5, fs0
 ; RV32IFD-NEXT:    beqz a2, .LBB1_4
 ; RV32IFD-NEXT:  # %bb.3:
 ; RV32IFD-NEXT:    addi a1, a3, -1
@@ -94,11 +94,11 @@ define i64 @test_floor_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call floor at plt
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI3_0)(a0)
-; RV32IFD-NEXT:    flt.d a0, ft0, fa0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI3_0)(a0)
+; RV32IFD-NEXT:    flt.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s0, a0
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fle.d a0, ft0, fa0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
 ; RV32IFD-NEXT:    call __fixunsdfdi at plt
 ; RV32IFD-NEXT:    and a0, s1, a0
@@ -147,9 +147,9 @@ define i64 @test_ceil_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    call ceil at plt
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI5_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI5_0)(a0)
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI5_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
-; RV32IFD-NEXT:    fle.d s0, ft0, fa0
+; RV32IFD-NEXT:    fle.d s0, fa5, fa0
 ; RV32IFD-NEXT:    call __fixdfdi at plt
 ; RV32IFD-NEXT:    lui a3, 524288
 ; RV32IFD-NEXT:    bnez s0, .LBB5_2
@@ -157,8 +157,8 @@ define i64 @test_ceil_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    lui a1, 524288
 ; RV32IFD-NEXT:  .LBB5_2:
 ; RV32IFD-NEXT:    lui a2, %hi(.LCPI5_1)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI5_1)(a2)
-; RV32IFD-NEXT:    flt.d a2, ft0, fs0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI5_1)(a2)
+; RV32IFD-NEXT:    flt.d a2, fa5, fs0
 ; RV32IFD-NEXT:    beqz a2, .LBB5_4
 ; RV32IFD-NEXT:  # %bb.3:
 ; RV32IFD-NEXT:    addi a1, a3, -1
@@ -214,11 +214,11 @@ define i64 @test_ceil_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call ceil at plt
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI7_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI7_0)(a0)
-; RV32IFD-NEXT:    flt.d a0, ft0, fa0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI7_0)(a0)
+; RV32IFD-NEXT:    flt.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s0, a0
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fle.d a0, ft0, fa0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
 ; RV32IFD-NEXT:    call __fixunsdfdi at plt
 ; RV32IFD-NEXT:    and a0, s1, a0
@@ -267,9 +267,9 @@ define i64 @test_trunc_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    call trunc at plt
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI9_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI9_0)(a0)
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI9_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
-; RV32IFD-NEXT:    fle.d s0, ft0, fa0
+; RV32IFD-NEXT:    fle.d s0, fa5, fa0
 ; RV32IFD-NEXT:    call __fixdfdi at plt
 ; RV32IFD-NEXT:    lui a3, 524288
 ; RV32IFD-NEXT:    bnez s0, .LBB9_2
@@ -277,8 +277,8 @@ define i64 @test_trunc_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    lui a1, 524288
 ; RV32IFD-NEXT:  .LBB9_2:
 ; RV32IFD-NEXT:    lui a2, %hi(.LCPI9_1)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI9_1)(a2)
-; RV32IFD-NEXT:    flt.d a2, ft0, fs0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI9_1)(a2)
+; RV32IFD-NEXT:    flt.d a2, fa5, fs0
 ; RV32IFD-NEXT:    beqz a2, .LBB9_4
 ; RV32IFD-NEXT:  # %bb.3:
 ; RV32IFD-NEXT:    addi a1, a3, -1
@@ -334,11 +334,11 @@ define i64 @test_trunc_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call trunc at plt
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI11_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
-; RV32IFD-NEXT:    flt.d a0, ft0, fa0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
+; RV32IFD-NEXT:    flt.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s0, a0
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fle.d a0, ft0, fa0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
 ; RV32IFD-NEXT:    call __fixunsdfdi at plt
 ; RV32IFD-NEXT:    and a0, s1, a0
@@ -387,9 +387,9 @@ define i64 @test_round_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    call round at plt
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI13_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
-; RV32IFD-NEXT:    fle.d s0, ft0, fa0
+; RV32IFD-NEXT:    fle.d s0, fa5, fa0
 ; RV32IFD-NEXT:    call __fixdfdi at plt
 ; RV32IFD-NEXT:    lui a3, 524288
 ; RV32IFD-NEXT:    bnez s0, .LBB13_2
@@ -397,8 +397,8 @@ define i64 @test_round_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    lui a1, 524288
 ; RV32IFD-NEXT:  .LBB13_2:
 ; RV32IFD-NEXT:    lui a2, %hi(.LCPI13_1)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI13_1)(a2)
-; RV32IFD-NEXT:    flt.d a2, ft0, fs0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI13_1)(a2)
+; RV32IFD-NEXT:    flt.d a2, fa5, fs0
 ; RV32IFD-NEXT:    beqz a2, .LBB13_4
 ; RV32IFD-NEXT:  # %bb.3:
 ; RV32IFD-NEXT:    addi a1, a3, -1
@@ -454,11 +454,11 @@ define i64 @test_round_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call round at plt
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI15_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI15_0)(a0)
-; RV32IFD-NEXT:    flt.d a0, ft0, fa0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI15_0)(a0)
+; RV32IFD-NEXT:    flt.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s0, a0
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fle.d a0, ft0, fa0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
 ; RV32IFD-NEXT:    call __fixunsdfdi at plt
 ; RV32IFD-NEXT:    and a0, s1, a0
@@ -507,9 +507,9 @@ define i64 @test_roundeven_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    call roundeven at plt
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI17_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI17_0)(a0)
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
 ; RV32IFD-NEXT:    fmv.d fs0, fa0
-; RV32IFD-NEXT:    fle.d s0, ft0, fa0
+; RV32IFD-NEXT:    fle.d s0, fa5, fa0
 ; RV32IFD-NEXT:    call __fixdfdi at plt
 ; RV32IFD-NEXT:    lui a3, 524288
 ; RV32IFD-NEXT:    bnez s0, .LBB17_2
@@ -517,8 +517,8 @@ define i64 @test_roundeven_si64(double %x) nounwind {
 ; RV32IFD-NEXT:    lui a1, 524288
 ; RV32IFD-NEXT:  .LBB17_2:
 ; RV32IFD-NEXT:    lui a2, %hi(.LCPI17_1)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI17_1)(a2)
-; RV32IFD-NEXT:    flt.d a2, ft0, fs0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI17_1)(a2)
+; RV32IFD-NEXT:    flt.d a2, fa5, fs0
 ; RV32IFD-NEXT:    beqz a2, .LBB17_4
 ; RV32IFD-NEXT:  # %bb.3:
 ; RV32IFD-NEXT:    addi a1, a3, -1
@@ -574,11 +574,11 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
 ; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    call roundeven at plt
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI19_0)
-; RV32IFD-NEXT:    fld ft0, %lo(.LCPI19_0)(a0)
-; RV32IFD-NEXT:    flt.d a0, ft0, fa0
+; RV32IFD-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32IFD-NEXT:    flt.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s0, a0
-; RV32IFD-NEXT:    fcvt.d.w ft0, zero
-; RV32IFD-NEXT:    fle.d a0, ft0, fa0
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fle.d a0, fa5, fa0
 ; RV32IFD-NEXT:    neg s1, a0
 ; RV32IFD-NEXT:    call __fixunsdfdi at plt
 ; RV32IFD-NEXT:    and a0, s1, a0

diff  --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll
index 44b55030f27a..c67b94a972bd 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll
@@ -632,14 +632,14 @@ define double @test_floor_double(double %x) {
 ; RV64IFD-LABEL: test_floor_double:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI40_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI40_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB40_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0, rdn
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rdn
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB40_2:
 ; RV64IFD-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
@@ -654,14 +654,14 @@ define double @test_ceil_double(double %x) {
 ; RV64IFD-LABEL: test_ceil_double:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI41_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI41_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB41_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0, rup
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rup
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB41_2:
 ; RV64IFD-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
@@ -676,14 +676,14 @@ define double @test_trunc_double(double %x) {
 ; RV64IFD-LABEL: test_trunc_double:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI42_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI42_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB42_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0, rtz
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rtz
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB42_2:
 ; RV64IFD-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
@@ -698,14 +698,14 @@ define double @test_round_double(double %x) {
 ; RV64IFD-LABEL: test_round_double:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI43_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI43_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB43_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0, rmm
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rmm
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB43_2:
 ; RV64IFD-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
@@ -720,14 +720,14 @@ define double @test_roundeven_double(double %x) {
 ; RV64IFD-LABEL: test_roundeven_double:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    lui a0, %hi(.LCPI44_0)
-; RV64IFD-NEXT:    fld ft0, %lo(.LCPI44_0)(a0)
-; RV64IFD-NEXT:    fabs.d ft1, fa0
-; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV64IFD-NEXT:    fabs.d fa4, fa0
+; RV64IFD-NEXT:    flt.d a0, fa4, fa5
 ; RV64IFD-NEXT:    beqz a0, .LBB44_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
-; RV64IFD-NEXT:    fcvt.d.l ft0, a0, rne
-; RV64IFD-NEXT:    fsgnj.d fa0, ft0, fa0
+; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rne
+; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB44_2:
 ; RV64IFD-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)

diff  --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
index 88a91d05f9ac..95438fda9a70 100644
--- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
+++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
@@ -11,22 +11,22 @@ define double @func(double %d, i32 %n) nounwind {
 ; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    sw a0, 16(sp)
 ; RV32IFD-NEXT:    sw a1, 20(sp)
-; RV32IFD-NEXT:    fld ft0, 16(sp)
+; RV32IFD-NEXT:    fld fa5, 16(sp)
 ; RV32IFD-NEXT:    beqz a2, .LBB0_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
 ; RV32IFD-NEXT:    addi a2, a2, -1
-; RV32IFD-NEXT:    fsd ft0, 16(sp)
+; RV32IFD-NEXT:    fsd fa5, 16(sp)
 ; RV32IFD-NEXT:    lw a0, 16(sp)
 ; RV32IFD-NEXT:    lw a1, 20(sp)
-; RV32IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
+; RV32IFD-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
 ; RV32IFD-NEXT:    call func at plt
 ; RV32IFD-NEXT:    sw a0, 16(sp)
 ; RV32IFD-NEXT:    sw a1, 20(sp)
-; RV32IFD-NEXT:    fld ft0, 16(sp)
-; RV32IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT:    fadd.d ft0, ft0, ft1
+; RV32IFD-NEXT:    fld fa5, 16(sp)
+; RV32IFD-NEXT:    fld fa4, 8(sp) # 8-byte Folded Reload
+; RV32IFD-NEXT:    fadd.d fa5, fa5, fa4
 ; RV32IFD-NEXT:  .LBB0_2: # %return
-; RV32IFD-NEXT:    fsd ft0, 16(sp)
+; RV32IFD-NEXT:    fsd fa5, 16(sp)
 ; RV32IFD-NEXT:    lw a0, 16(sp)
 ; RV32IFD-NEXT:    lw a1, 20(sp)
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -36,24 +36,24 @@ define double @func(double %d, i32 %n) nounwind {
 ; RV64IFD-LABEL: func:
 ; RV64IFD:       # %bb.0: # %entry
 ; RV64IFD-NEXT:    sext.w a2, a1
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x fa5, a0
 ; RV64IFD-NEXT:    beqz a2, .LBB0_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    addiw a1, a1, -1
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
-; RV64IFD-NEXT:    fsd ft0, 0(sp) # 8-byte Folded Spill
+; RV64IFD-NEXT:    fmv.x.d a0, fa5
+; RV64IFD-NEXT:    fsd fa5, 0(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call func at plt
-; RV64IFD-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-NEXT:    fld ft1, 0(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.d.x fa5, a0
+; RV64IFD-NEXT:    fld fa4, 0(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT:    fadd.d fa5, fa5, fa4
+; RV64IFD-NEXT:    fmv.x.d a0, fa5
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB0_2: # %return
-; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, fa5
 ; RV64IFD-NEXT:    ret
 entry:
   %cmp = icmp eq i32 %n, 0

diff  --git a/llvm/test/CodeGen/RISCV/double_reduct.ll b/llvm/test/CodeGen/RISCV/double_reduct.ll
index d5bde8bad7c3..40d11808c862 100644
--- a/llvm/test/CodeGen/RISCV/double_reduct.ll
+++ b/llvm/test/CodeGen/RISCV/double_reduct.ll
@@ -27,13 +27,13 @@ define float @fmul_f32(<4 x float> %a, <4 x float> %b) {
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
 ; CHECK-NEXT:    vrgather.vi v10, v8, 1
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 2
 ; CHECK-NEXT:    vfmul.vv v8, v9, v8
 ; CHECK-NEXT:    vrgather.vi v9, v8, 1
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    vfmv.f.s ft1, v8
-; CHECK-NEXT:    fmul.s fa0, ft0, ft1
+; CHECK-NEXT:    vfmv.f.s fa4, v8
+; CHECK-NEXT:    fmul.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %r1 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a)
   %r2 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %b)
@@ -45,14 +45,14 @@ define float @fmin_f32(<4 x float> %a, <4 x float> %b) {
 ; CHECK-LABEL: fmin_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    flw ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    flw fa5, %lo(.LCPI2_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v10, ft0
+; CHECK-NEXT:    vfmv.s.f v10, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v10
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    vfmv.f.s ft1, v8
-; CHECK-NEXT:    fmin.s fa0, ft0, ft1
+; CHECK-NEXT:    vfmv.f.s fa4, v8
+; CHECK-NEXT:    fmin.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %r1 = call fast float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a)
   %r2 = call fast float @llvm.vector.reduce.fmin.v4f32(<4 x float> %b)
@@ -64,14 +64,14 @@ define float @fmax_f32(<4 x float> %a, <4 x float> %b) {
 ; CHECK-LABEL: fmax_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT:    flw ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    flw fa5, %lo(.LCPI3_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v10, ft0
+; CHECK-NEXT:    vfmv.s.f v10, fa5
 ; CHECK-NEXT:    vfredmax.vs v8, v8, v10
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    vfmv.f.s ft1, v8
-; CHECK-NEXT:    fmax.s fa0, ft0, ft1
+; CHECK-NEXT:    vfmv.f.s fa4, v8
+; CHECK-NEXT:    fmax.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %r1 = call fast float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a)
   %r2 = call fast float @llvm.vector.reduce.fmax.v4f32(<4 x float> %b)

diff  --git a/llvm/test/CodeGen/RISCV/float-arith-strict.ll b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
index 80158daea446..3c6aceca529a 100644
--- a/llvm/test/CodeGen/RISCV/float-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
@@ -263,9 +263,9 @@ declare float @llvm.experimental.constrained.fma.f32(float, float, float, metada
 define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
 ; CHECKIF-LABEL: fmsub_s:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft0, fa2, ft0
-; CHECKIF-NEXT:    fmsub.s fa0, fa0, fa1, ft0
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa5, fa2, fa5
+; CHECKIF-NEXT:    fmsub.s fa0, fa0, fa1, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_s:
@@ -320,10 +320,10 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
 define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp {
 ; CHECKIF-LABEL: fnmadd_s:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft1, fa0, ft0
-; CHECKIF-NEXT:    fadd.s ft0, fa2, ft0
-; CHECKIF-NEXT:    fnmadd.s fa0, ft1, fa1, ft0
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa4, fa0, fa5
+; CHECKIF-NEXT:    fadd.s fa5, fa2, fa5
+; CHECKIF-NEXT:    fnmadd.s fa0, fa4, fa1, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s:
@@ -392,10 +392,10 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp {
 define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp {
 ; CHECKIF-LABEL: fnmadd_s_2:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft1, fa1, ft0
-; CHECKIF-NEXT:    fadd.s ft0, fa2, ft0
-; CHECKIF-NEXT:    fnmadd.s fa0, ft1, fa0, ft0
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa4, fa1, fa5
+; CHECKIF-NEXT:    fadd.s fa5, fa2, fa5
+; CHECKIF-NEXT:    fnmadd.s fa0, fa4, fa0, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s_2:
@@ -464,9 +464,9 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp {
 define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp {
 ; CHECKIF-LABEL: fnmsub_s:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft0, fa0, ft0
-; CHECKIF-NEXT:    fnmsub.s fa0, ft0, fa1, fa2
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa5, fa0, fa5
+; CHECKIF-NEXT:    fnmsub.s fa0, fa5, fa1, fa2
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s:
@@ -519,9 +519,9 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp {
 define float @fnmsub_s_2(float %a, float %b, float %c) nounwind strictfp {
 ; CHECKIF-LABEL: fnmsub_s_2:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft0, fa1, ft0
-; CHECKIF-NEXT:    fnmsub.s fa0, ft0, fa0, fa2
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa5, fa1, fa5
+; CHECKIF-NEXT:    fnmsub.s fa0, fa5, fa0, fa2
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s_2:

diff  --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index 63fcfcfaa009..d6bb92736f01 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -182,9 +182,9 @@ define float @fsgnj_s(float %a, float %b) nounwind {
 define i32 @fneg_s(float %a, float %b) nounwind {
 ; CHECKIF-LABEL: fneg_s:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fadd.s ft0, fa0, fa0
-; CHECKIF-NEXT:    fneg.s ft1, ft0
-; CHECKIF-NEXT:    feq.s a0, ft0, ft1
+; CHECKIF-NEXT:    fadd.s fa5, fa0, fa0
+; CHECKIF-NEXT:    fneg.s fa4, fa5
+; CHECKIF-NEXT:    feq.s a0, fa5, fa4
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fneg_s:
@@ -224,8 +224,8 @@ define i32 @fneg_s(float %a, float %b) nounwind {
 define float @fsgnjn_s(float %a, float %b) nounwind {
 ; CHECKIF-LABEL: fsgnjn_s:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fadd.s ft0, fa0, fa1
-; CHECKIF-NEXT:    fsgnjn.s fa0, fa0, ft0
+; CHECKIF-NEXT:    fadd.s fa5, fa0, fa1
+; CHECKIF-NEXT:    fsgnjn.s fa0, fa0, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fsgnjn_s:
@@ -274,9 +274,9 @@ declare float @llvm.fabs.f32(float)
 define float @fabs_s(float %a, float %b) nounwind {
 ; CHECKIF-LABEL: fabs_s:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fadd.s ft0, fa0, fa1
-; CHECKIF-NEXT:    fabs.s ft1, ft0
-; CHECKIF-NEXT:    fadd.s fa0, ft1, ft0
+; CHECKIF-NEXT:    fadd.s fa5, fa0, fa1
+; CHECKIF-NEXT:    fabs.s fa4, fa5
+; CHECKIF-NEXT:    fadd.s fa0, fa4, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fabs_s:
@@ -400,9 +400,9 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind {
 define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; CHECKIF-LABEL: fmsub_s:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft0, fa2, ft0
-; CHECKIF-NEXT:    fmsub.s fa0, fa0, fa1, ft0
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa5, fa2, fa5
+; CHECKIF-NEXT:    fmsub.s fa0, fa0, fa1, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_s:
@@ -457,10 +457,10 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 ; CHECKIF-LABEL: fnmadd_s:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft1, fa0, ft0
-; CHECKIF-NEXT:    fadd.s ft0, fa2, ft0
-; CHECKIF-NEXT:    fnmadd.s fa0, ft1, fa1, ft0
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa4, fa0, fa5
+; CHECKIF-NEXT:    fadd.s fa5, fa2, fa5
+; CHECKIF-NEXT:    fnmadd.s fa0, fa4, fa1, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s:
@@ -529,10 +529,10 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
 ; CHECKIF-LABEL: fnmadd_s_2:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft1, fa1, ft0
-; CHECKIF-NEXT:    fadd.s ft0, fa2, ft0
-; CHECKIF-NEXT:    fnmadd.s fa0, ft1, fa0, ft0
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa4, fa1, fa5
+; CHECKIF-NEXT:    fadd.s fa5, fa2, fa5
+; CHECKIF-NEXT:    fnmadd.s fa0, fa4, fa0, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s_2:
@@ -613,8 +613,8 @@ define float @fnmadd_s_3(float %a, float %b, float %c) nounwind {
 ;
 ; CHECKIF-LABEL: fnmadd_s_3:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmadd.s ft0, fa0, fa1, fa2
-; CHECKIF-NEXT:    fneg.s fa0, ft0
+; CHECKIF-NEXT:    fmadd.s fa5, fa0, fa1, fa2
+; CHECKIF-NEXT:    fneg.s fa0, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s_3:
@@ -688,9 +688,9 @@ define float @fnmadd_nsz(float %a, float %b, float %c) nounwind {
 define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 ; CHECKIF-LABEL: fnmsub_s:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft0, fa0, ft0
-; CHECKIF-NEXT:    fnmsub.s fa0, ft0, fa1, fa2
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa5, fa0, fa5
+; CHECKIF-NEXT:    fnmsub.s fa0, fa5, fa1, fa2
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s:
@@ -743,9 +743,9 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
 ; CHECKIF-LABEL: fnmsub_s_2:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft0, fa1, ft0
-; CHECKIF-NEXT:    fnmsub.s fa0, ft0, fa0, fa2
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa5, fa1, fa5
+; CHECKIF-NEXT:    fnmsub.s fa0, fa5, fa0, fa2
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s_2:
@@ -838,9 +838,9 @@ define float @fmadd_s_contract(float %a, float %b, float %c) nounwind {
 define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; CHECKIF-LABEL: fmsub_s_contract:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft0, fa2, ft0
-; CHECKIF-NEXT:    fmsub.s fa0, fa0, fa1, ft0
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa5, fa2, fa5
+; CHECKIF-NEXT:    fmsub.s fa0, fa0, fa1, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_s_contract:
@@ -901,11 +901,11 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
 define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; CHECKIF-LABEL: fnmadd_s_contract:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft1, fa0, ft0
-; CHECKIF-NEXT:    fadd.s ft2, fa1, ft0
-; CHECKIF-NEXT:    fadd.s ft0, fa2, ft0
-; CHECKIF-NEXT:    fnmadd.s fa0, ft1, ft2, ft0
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa4, fa0, fa5
+; CHECKIF-NEXT:    fadd.s fa3, fa1, fa5
+; CHECKIF-NEXT:    fadd.s fa5, fa2, fa5
+; CHECKIF-NEXT:    fnmadd.s fa0, fa4, fa3, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s_contract:
@@ -987,10 +987,10 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
 define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; CHECKIF-LABEL: fnmsub_s_contract:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, zero
-; CHECKIF-NEXT:    fadd.s ft1, fa0, ft0
-; CHECKIF-NEXT:    fadd.s ft0, fa1, ft0
-; CHECKIF-NEXT:    fnmsub.s fa0, ft1, ft0, fa2
+; CHECKIF-NEXT:    fmv.w.x fa5, zero
+; CHECKIF-NEXT:    fadd.s fa4, fa0, fa5
+; CHECKIF-NEXT:    fadd.s fa5, fa1, fa5
+; CHECKIF-NEXT:    fnmsub.s fa0, fa4, fa5, fa2
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s_contract:

diff  --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
index f1ce0ccece51..f0c34317a562 100644
--- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
@@ -17,42 +17,42 @@
 define float @bitcast_and(float %a1, float %a2) nounwind {
 ; RV32F-LABEL: bitcast_and:
 ; RV32F:       # %bb.0:
-; RV32F-NEXT:    fmv.w.x ft0, a1
-; RV32F-NEXT:    fmv.w.x ft1, a0
-; RV32F-NEXT:    fadd.s ft0, ft1, ft0
-; RV32F-NEXT:    fabs.s ft0, ft0
-; RV32F-NEXT:    fadd.s ft0, ft1, ft0
-; RV32F-NEXT:    fmv.x.w a0, ft0
+; RV32F-NEXT:    fmv.w.x fa5, a1
+; RV32F-NEXT:    fmv.w.x fa4, a0
+; RV32F-NEXT:    fadd.s fa5, fa4, fa5
+; RV32F-NEXT:    fabs.s fa5, fa5
+; RV32F-NEXT:    fadd.s fa5, fa4, fa5
+; RV32F-NEXT:    fmv.x.w a0, fa5
 ; RV32F-NEXT:    ret
 ;
 ; RV32FD-LABEL: bitcast_and:
 ; RV32FD:       # %bb.0:
-; RV32FD-NEXT:    fmv.w.x ft0, a1
-; RV32FD-NEXT:    fmv.w.x ft1, a0
-; RV32FD-NEXT:    fadd.s ft0, ft1, ft0
-; RV32FD-NEXT:    fabs.s ft0, ft0
-; RV32FD-NEXT:    fadd.s ft0, ft1, ft0
-; RV32FD-NEXT:    fmv.x.w a0, ft0
+; RV32FD-NEXT:    fmv.w.x fa5, a1
+; RV32FD-NEXT:    fmv.w.x fa4, a0
+; RV32FD-NEXT:    fadd.s fa5, fa4, fa5
+; RV32FD-NEXT:    fabs.s fa5, fa5
+; RV32FD-NEXT:    fadd.s fa5, fa4, fa5
+; RV32FD-NEXT:    fmv.x.w a0, fa5
 ; RV32FD-NEXT:    ret
 ;
 ; RV64F-LABEL: bitcast_and:
 ; RV64F:       # %bb.0:
-; RV64F-NEXT:    fmv.w.x ft0, a1
-; RV64F-NEXT:    fmv.w.x ft1, a0
-; RV64F-NEXT:    fadd.s ft0, ft1, ft0
-; RV64F-NEXT:    fabs.s ft0, ft0
-; RV64F-NEXT:    fadd.s ft0, ft1, ft0
-; RV64F-NEXT:    fmv.x.w a0, ft0
+; RV64F-NEXT:    fmv.w.x fa5, a1
+; RV64F-NEXT:    fmv.w.x fa4, a0
+; RV64F-NEXT:    fadd.s fa5, fa4, fa5
+; RV64F-NEXT:    fabs.s fa5, fa5
+; RV64F-NEXT:    fadd.s fa5, fa4, fa5
+; RV64F-NEXT:    fmv.x.w a0, fa5
 ; RV64F-NEXT:    ret
 ;
 ; RV64FD-LABEL: bitcast_and:
 ; RV64FD:       # %bb.0:
-; RV64FD-NEXT:    fmv.w.x ft0, a1
-; RV64FD-NEXT:    fmv.w.x ft1, a0
-; RV64FD-NEXT:    fadd.s ft0, ft1, ft0
-; RV64FD-NEXT:    fabs.s ft0, ft0
-; RV64FD-NEXT:    fadd.s ft0, ft1, ft0
-; RV64FD-NEXT:    fmv.x.w a0, ft0
+; RV64FD-NEXT:    fmv.w.x fa5, a1
+; RV64FD-NEXT:    fmv.w.x fa4, a0
+; RV64FD-NEXT:    fadd.s fa5, fa4, fa5
+; RV64FD-NEXT:    fabs.s fa5, fa5
+; RV64FD-NEXT:    fadd.s fa5, fa4, fa5
+; RV64FD-NEXT:    fmv.x.w a0, fa5
 ; RV64FD-NEXT:    ret
   %a3 = fadd float %a1, %a2
   %bc1 = bitcast float %a3 to i32
@@ -89,14 +89,14 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 ; RV32FD-NEXT:    addi sp, sp, -16
 ; RV32FD-NEXT:    sw a2, 8(sp)
 ; RV32FD-NEXT:    sw a3, 12(sp)
-; RV32FD-NEXT:    fld ft0, 8(sp)
+; RV32FD-NEXT:    fld fa5, 8(sp)
 ; RV32FD-NEXT:    sw a0, 8(sp)
 ; RV32FD-NEXT:    sw a1, 12(sp)
-; RV32FD-NEXT:    fld ft1, 8(sp)
-; RV32FD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32FD-NEXT:    fabs.d ft0, ft0
-; RV32FD-NEXT:    fadd.d ft0, ft1, ft0
-; RV32FD-NEXT:    fsd ft0, 8(sp)
+; RV32FD-NEXT:    fld fa4, 8(sp)
+; RV32FD-NEXT:    fadd.d fa5, fa4, fa5
+; RV32FD-NEXT:    fabs.d fa5, fa5
+; RV32FD-NEXT:    fadd.d fa5, fa4, fa5
+; RV32FD-NEXT:    fsd fa5, 8(sp)
 ; RV32FD-NEXT:    lw a0, 8(sp)
 ; RV32FD-NEXT:    lw a1, 12(sp)
 ; RV32FD-NEXT:    addi sp, sp, 16
@@ -120,12 +120,12 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 ;
 ; RV64FD-LABEL: bitcast_double_and:
 ; RV64FD:       # %bb.0:
-; RV64FD-NEXT:    fmv.d.x ft0, a1
-; RV64FD-NEXT:    fmv.d.x ft1, a0
-; RV64FD-NEXT:    fadd.d ft0, ft1, ft0
-; RV64FD-NEXT:    fabs.d ft0, ft0
-; RV64FD-NEXT:    fadd.d ft0, ft1, ft0
-; RV64FD-NEXT:    fmv.x.d a0, ft0
+; RV64FD-NEXT:    fmv.d.x fa5, a1
+; RV64FD-NEXT:    fmv.d.x fa4, a0
+; RV64FD-NEXT:    fadd.d fa5, fa4, fa5
+; RV64FD-NEXT:    fabs.d fa5, fa5
+; RV64FD-NEXT:    fadd.d fa5, fa4, fa5
+; RV64FD-NEXT:    fmv.x.d a0, fa5
 ; RV64FD-NEXT:    ret
   %a3 = fadd double %a1, %a2
   %bc1 = bitcast double %a3 to i64
@@ -139,42 +139,42 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 define float @bitcast_xor(float %a1, float %a2) nounwind {
 ; RV32F-LABEL: bitcast_xor:
 ; RV32F:       # %bb.0:
-; RV32F-NEXT:    fmv.w.x ft0, a1
-; RV32F-NEXT:    fmv.w.x ft1, a0
-; RV32F-NEXT:    fmul.s ft0, ft1, ft0
-; RV32F-NEXT:    fneg.s ft0, ft0
-; RV32F-NEXT:    fmul.s ft0, ft1, ft0
-; RV32F-NEXT:    fmv.x.w a0, ft0
+; RV32F-NEXT:    fmv.w.x fa5, a1
+; RV32F-NEXT:    fmv.w.x fa4, a0
+; RV32F-NEXT:    fmul.s fa5, fa4, fa5
+; RV32F-NEXT:    fneg.s fa5, fa5
+; RV32F-NEXT:    fmul.s fa5, fa4, fa5
+; RV32F-NEXT:    fmv.x.w a0, fa5
 ; RV32F-NEXT:    ret
 ;
 ; RV32FD-LABEL: bitcast_xor:
 ; RV32FD:       # %bb.0:
-; RV32FD-NEXT:    fmv.w.x ft0, a1
-; RV32FD-NEXT:    fmv.w.x ft1, a0
-; RV32FD-NEXT:    fmul.s ft0, ft1, ft0
-; RV32FD-NEXT:    fneg.s ft0, ft0
-; RV32FD-NEXT:    fmul.s ft0, ft1, ft0
-; RV32FD-NEXT:    fmv.x.w a0, ft0
+; RV32FD-NEXT:    fmv.w.x fa5, a1
+; RV32FD-NEXT:    fmv.w.x fa4, a0
+; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
+; RV32FD-NEXT:    fneg.s fa5, fa5
+; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
+; RV32FD-NEXT:    fmv.x.w a0, fa5
 ; RV32FD-NEXT:    ret
 ;
 ; RV64F-LABEL: bitcast_xor:
 ; RV64F:       # %bb.0:
-; RV64F-NEXT:    fmv.w.x ft0, a1
-; RV64F-NEXT:    fmv.w.x ft1, a0
-; RV64F-NEXT:    fmul.s ft0, ft1, ft0
-; RV64F-NEXT:    fneg.s ft0, ft0
-; RV64F-NEXT:    fmul.s ft0, ft1, ft0
-; RV64F-NEXT:    fmv.x.w a0, ft0
+; RV64F-NEXT:    fmv.w.x fa5, a1
+; RV64F-NEXT:    fmv.w.x fa4, a0
+; RV64F-NEXT:    fmul.s fa5, fa4, fa5
+; RV64F-NEXT:    fneg.s fa5, fa5
+; RV64F-NEXT:    fmul.s fa5, fa4, fa5
+; RV64F-NEXT:    fmv.x.w a0, fa5
 ; RV64F-NEXT:    ret
 ;
 ; RV64FD-LABEL: bitcast_xor:
 ; RV64FD:       # %bb.0:
-; RV64FD-NEXT:    fmv.w.x ft0, a1
-; RV64FD-NEXT:    fmv.w.x ft1, a0
-; RV64FD-NEXT:    fmul.s ft0, ft1, ft0
-; RV64FD-NEXT:    fneg.s ft0, ft0
-; RV64FD-NEXT:    fmul.s ft0, ft1, ft0
-; RV64FD-NEXT:    fmv.x.w a0, ft0
+; RV64FD-NEXT:    fmv.w.x fa5, a1
+; RV64FD-NEXT:    fmv.w.x fa4, a0
+; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
+; RV64FD-NEXT:    fneg.s fa5, fa5
+; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
+; RV64FD-NEXT:    fmv.x.w a0, fa5
 ; RV64FD-NEXT:    ret
   %a3 = fmul float %a1, %a2
   %bc1 = bitcast float %a3 to i32
@@ -211,14 +211,14 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 ; RV32FD-NEXT:    addi sp, sp, -16
 ; RV32FD-NEXT:    sw a2, 8(sp)
 ; RV32FD-NEXT:    sw a3, 12(sp)
-; RV32FD-NEXT:    fld ft0, 8(sp)
+; RV32FD-NEXT:    fld fa5, 8(sp)
 ; RV32FD-NEXT:    sw a0, 8(sp)
 ; RV32FD-NEXT:    sw a1, 12(sp)
-; RV32FD-NEXT:    fld ft1, 8(sp)
-; RV32FD-NEXT:    fmul.d ft0, ft1, ft0
-; RV32FD-NEXT:    fneg.d ft0, ft0
-; RV32FD-NEXT:    fmul.d ft0, ft1, ft0
-; RV32FD-NEXT:    fsd ft0, 8(sp)
+; RV32FD-NEXT:    fld fa4, 8(sp)
+; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
+; RV32FD-NEXT:    fneg.d fa5, fa5
+; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
+; RV32FD-NEXT:    fsd fa5, 8(sp)
 ; RV32FD-NEXT:    lw a0, 8(sp)
 ; RV32FD-NEXT:    lw a1, 12(sp)
 ; RV32FD-NEXT:    addi sp, sp, 16
@@ -243,12 +243,12 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 ;
 ; RV64FD-LABEL: bitcast_double_xor:
 ; RV64FD:       # %bb.0:
-; RV64FD-NEXT:    fmv.d.x ft0, a1
-; RV64FD-NEXT:    fmv.d.x ft1, a0
-; RV64FD-NEXT:    fmul.d ft0, ft1, ft0
-; RV64FD-NEXT:    fneg.d ft0, ft0
-; RV64FD-NEXT:    fmul.d ft0, ft1, ft0
-; RV64FD-NEXT:    fmv.x.d a0, ft0
+; RV64FD-NEXT:    fmv.d.x fa5, a1
+; RV64FD-NEXT:    fmv.d.x fa4, a0
+; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
+; RV64FD-NEXT:    fneg.d fa5, fa5
+; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
+; RV64FD-NEXT:    fmv.x.d a0, fa5
 ; RV64FD-NEXT:    ret
   %a3 = fmul double %a1, %a2
   %bc1 = bitcast double %a3 to i64
@@ -261,46 +261,46 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 define float @bitcast_or(float %a1, float %a2) nounwind {
 ; RV32F-LABEL: bitcast_or:
 ; RV32F:       # %bb.0:
-; RV32F-NEXT:    fmv.w.x ft0, a1
-; RV32F-NEXT:    fmv.w.x ft1, a0
-; RV32F-NEXT:    fmul.s ft0, ft1, ft0
-; RV32F-NEXT:    fabs.s ft0, ft0
-; RV32F-NEXT:    fneg.s ft0, ft0
-; RV32F-NEXT:    fmul.s ft0, ft1, ft0
-; RV32F-NEXT:    fmv.x.w a0, ft0
+; RV32F-NEXT:    fmv.w.x fa5, a1
+; RV32F-NEXT:    fmv.w.x fa4, a0
+; RV32F-NEXT:    fmul.s fa5, fa4, fa5
+; RV32F-NEXT:    fabs.s fa5, fa5
+; RV32F-NEXT:    fneg.s fa5, fa5
+; RV32F-NEXT:    fmul.s fa5, fa4, fa5
+; RV32F-NEXT:    fmv.x.w a0, fa5
 ; RV32F-NEXT:    ret
 ;
 ; RV32FD-LABEL: bitcast_or:
 ; RV32FD:       # %bb.0:
-; RV32FD-NEXT:    fmv.w.x ft0, a1
-; RV32FD-NEXT:    fmv.w.x ft1, a0
-; RV32FD-NEXT:    fmul.s ft0, ft1, ft0
-; RV32FD-NEXT:    fabs.s ft0, ft0
-; RV32FD-NEXT:    fneg.s ft0, ft0
-; RV32FD-NEXT:    fmul.s ft0, ft1, ft0
-; RV32FD-NEXT:    fmv.x.w a0, ft0
+; RV32FD-NEXT:    fmv.w.x fa5, a1
+; RV32FD-NEXT:    fmv.w.x fa4, a0
+; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
+; RV32FD-NEXT:    fabs.s fa5, fa5
+; RV32FD-NEXT:    fneg.s fa5, fa5
+; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
+; RV32FD-NEXT:    fmv.x.w a0, fa5
 ; RV32FD-NEXT:    ret
 ;
 ; RV64F-LABEL: bitcast_or:
 ; RV64F:       # %bb.0:
-; RV64F-NEXT:    fmv.w.x ft0, a1
-; RV64F-NEXT:    fmv.w.x ft1, a0
-; RV64F-NEXT:    fmul.s ft0, ft1, ft0
-; RV64F-NEXT:    fabs.s ft0, ft0
-; RV64F-NEXT:    fneg.s ft0, ft0
-; RV64F-NEXT:    fmul.s ft0, ft1, ft0
-; RV64F-NEXT:    fmv.x.w a0, ft0
+; RV64F-NEXT:    fmv.w.x fa5, a1
+; RV64F-NEXT:    fmv.w.x fa4, a0
+; RV64F-NEXT:    fmul.s fa5, fa4, fa5
+; RV64F-NEXT:    fabs.s fa5, fa5
+; RV64F-NEXT:    fneg.s fa5, fa5
+; RV64F-NEXT:    fmul.s fa5, fa4, fa5
+; RV64F-NEXT:    fmv.x.w a0, fa5
 ; RV64F-NEXT:    ret
 ;
 ; RV64FD-LABEL: bitcast_or:
 ; RV64FD:       # %bb.0:
-; RV64FD-NEXT:    fmv.w.x ft0, a1
-; RV64FD-NEXT:    fmv.w.x ft1, a0
-; RV64FD-NEXT:    fmul.s ft0, ft1, ft0
-; RV64FD-NEXT:    fabs.s ft0, ft0
-; RV64FD-NEXT:    fneg.s ft0, ft0
-; RV64FD-NEXT:    fmul.s ft0, ft1, ft0
-; RV64FD-NEXT:    fmv.x.w a0, ft0
+; RV64FD-NEXT:    fmv.w.x fa5, a1
+; RV64FD-NEXT:    fmv.w.x fa4, a0
+; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
+; RV64FD-NEXT:    fabs.s fa5, fa5
+; RV64FD-NEXT:    fneg.s fa5, fa5
+; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
+; RV64FD-NEXT:    fmv.x.w a0, fa5
 ; RV64FD-NEXT:    ret
   %a3 = fmul float %a1, %a2
   %bc1 = bitcast float %a3 to i32
@@ -337,15 +337,15 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
 ; RV32FD-NEXT:    addi sp, sp, -16
 ; RV32FD-NEXT:    sw a2, 8(sp)
 ; RV32FD-NEXT:    sw a3, 12(sp)
-; RV32FD-NEXT:    fld ft0, 8(sp)
+; RV32FD-NEXT:    fld fa5, 8(sp)
 ; RV32FD-NEXT:    sw a0, 8(sp)
 ; RV32FD-NEXT:    sw a1, 12(sp)
-; RV32FD-NEXT:    fld ft1, 8(sp)
-; RV32FD-NEXT:    fmul.d ft0, ft1, ft0
-; RV32FD-NEXT:    fabs.d ft0, ft0
-; RV32FD-NEXT:    fneg.d ft0, ft0
-; RV32FD-NEXT:    fmul.d ft0, ft1, ft0
-; RV32FD-NEXT:    fsd ft0, 8(sp)
+; RV32FD-NEXT:    fld fa4, 8(sp)
+; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
+; RV32FD-NEXT:    fabs.d fa5, fa5
+; RV32FD-NEXT:    fneg.d fa5, fa5
+; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
+; RV32FD-NEXT:    fsd fa5, 8(sp)
 ; RV32FD-NEXT:    lw a0, 8(sp)
 ; RV32FD-NEXT:    lw a1, 12(sp)
 ; RV32FD-NEXT:    addi sp, sp, 16
@@ -370,13 +370,13 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
 ;
 ; RV64FD-LABEL: bitcast_double_or:
 ; RV64FD:       # %bb.0:
-; RV64FD-NEXT:    fmv.d.x ft0, a1
-; RV64FD-NEXT:    fmv.d.x ft1, a0
-; RV64FD-NEXT:    fmul.d ft0, ft1, ft0
-; RV64FD-NEXT:    fabs.d ft0, ft0
-; RV64FD-NEXT:    fneg.d ft0, ft0
-; RV64FD-NEXT:    fmul.d ft0, ft1, ft0
-; RV64FD-NEXT:    fmv.x.d a0, ft0
+; RV64FD-NEXT:    fmv.d.x fa5, a1
+; RV64FD-NEXT:    fmv.d.x fa4, a0
+; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
+; RV64FD-NEXT:    fabs.d fa5, fa5
+; RV64FD-NEXT:    fneg.d fa5, fa5
+; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
+; RV64FD-NEXT:    fmv.x.d a0, fa5
 ; RV64FD-NEXT:    ret
   %a3 = fmul double %a1, %a2
   %bc1 = bitcast double %a3 to i64

diff  --git a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
index d1d3f5ed379e..3741e8648040 100644
--- a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
@@ -93,10 +93,10 @@ define float @fcopysign_fneg(float %a, float %b) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    xor a1, a1, a2
-; RV32IF-NEXT:    fmv.w.x ft0, a1
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fsgnj.s ft0, ft1, ft0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a1
+; RV32IF-NEXT:    fmv.w.x fa4, a0
+; RV32IF-NEXT:    fsgnj.s fa5, fa4, fa5
+; RV32IF-NEXT:    fmv.x.w a0, fa5
 ; RV32IF-NEXT:    ret
 ;
 ; RV64I-LABEL: fcopysign_fneg:
@@ -111,10 +111,10 @@ define float @fcopysign_fneg(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: fcopysign_fneg:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a1
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fsgnjn.s ft0, ft1, ft0
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a1
+; RV64IF-NEXT:    fmv.w.x fa4, a0
+; RV64IF-NEXT:    fsgnjn.s fa5, fa4, fa5
+; RV64IF-NEXT:    fmv.x.w a0, fa5
 ; RV64IF-NEXT:    ret
   %1 = fneg float %b
   %2 = call float @llvm.copysign.f32(float %a, float %1)

diff  --git a/llvm/test/CodeGen/RISCV/float-convert-strict.ll b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
index 9bf675f3a681..47ebb49a8923 100644
--- a/llvm/test/CodeGen/RISCV/float-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
@@ -498,15 +498,15 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32IF-LABEL: fcvt_s_w_demanded_bits:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi a0, a0, 1
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fsw ft0, 0(a1)
+; RV32IF-NEXT:    fcvt.s.w fa5, a0
+; RV32IF-NEXT:    fsw fa5, 0(a1)
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_w_demanded_bits:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addiw a0, a0, 1
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fsw ft0, 0(a1)
+; RV64IF-NEXT:    fcvt.s.w fa5, a0
+; RV64IF-NEXT:    fsw fa5, 0(a1)
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_w_demanded_bits:
@@ -555,15 +555,15 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi a0, a0, 1
-; RV32IF-NEXT:    fcvt.s.wu ft0, a0
-; RV32IF-NEXT:    fsw ft0, 0(a1)
+; RV32IF-NEXT:    fcvt.s.wu fa5, a0
+; RV32IF-NEXT:    fsw fa5, 0(a1)
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addiw a0, a0, 1
-; RV64IF-NEXT:    fcvt.s.wu ft0, a0
-; RV64IF-NEXT:    fsw ft0, 0(a1)
+; RV64IF-NEXT:    fcvt.s.wu fa5, a0
+; RV64IF-NEXT:    fsw fa5, 0(a1)
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_wu_demanded_bits:

diff  --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index e0ce301dad3a..fac48ff0ea82 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -289,8 +289,8 @@ declare i32 @llvm.fptoui.sat.i32.f32(float)
 define i32 @fmv_x_w(float %a, float %b) nounwind {
 ; CHECKIF-LABEL: fmv_x_w:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fadd.s ft0, fa0, fa1
-; CHECKIF-NEXT:    fmv.x.w a0, ft0
+; CHECKIF-NEXT:    fadd.s fa5, fa0, fa1
+; CHECKIF-NEXT:    fmv.x.w a0, fa5
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmv_x_w:
@@ -443,9 +443,9 @@ define float @fcvt_s_wu_load(ptr %p) nounwind {
 define float @fmv_w_x(i32 %a, i32 %b) nounwind {
 ; CHECKIF-LABEL: fmv_w_x:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fmv.w.x ft0, a0
-; CHECKIF-NEXT:    fmv.w.x ft1, a1
-; CHECKIF-NEXT:    fadd.s fa0, ft0, ft1
+; CHECKIF-NEXT:    fmv.w.x fa5, a0
+; CHECKIF-NEXT:    fmv.w.x fa4, a1
+; CHECKIF-NEXT:    fadd.s fa0, fa5, fa4
 ; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fmv_w_x:
@@ -517,8 +517,8 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 913408
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fle.s s0, ft0, fa0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fle.s s0, fa5, fa0
 ; RV32IF-NEXT:    call __fixsfdi at plt
 ; RV32IF-NEXT:    lui a3, 524288
 ; RV32IF-NEXT:    bnez s0, .LBB12_2
@@ -526,8 +526,8 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    lui a1, 524288
 ; RV32IF-NEXT:  .LBB12_2: # %start
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI12_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_0)(a2)
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI12_0)(a2)
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    beqz a2, .LBB12_4
 ; RV32IF-NEXT:  # %bb.3:
 ; RV32IF-NEXT:    addi a1, a3, -1
@@ -712,14 +712,14 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
-; RV32IF-NEXT:    fmv.w.x ft0, zero
-; RV32IF-NEXT:    fle.s a0, ft0, fa0
+; RV32IF-NEXT:    fmv.w.x fa5, zero
+; RV32IF-NEXT:    fle.s a0, fa5, fa0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    call __fixunssfdi at plt
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI14_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI14_0)(a2)
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI14_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    neg a2, a2
 ; RV32IF-NEXT:    or a0, a2, a0
 ; RV32IF-NEXT:    and a1, s0, a1
@@ -1002,15 +1002,15 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32IF-LABEL: fcvt_s_w_demanded_bits:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi a0, a0, 1
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fsw ft0, 0(a1)
+; RV32IF-NEXT:    fcvt.s.w fa5, a0
+; RV32IF-NEXT:    fsw fa5, 0(a1)
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_w_demanded_bits:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addiw a0, a0, 1
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fsw ft0, 0(a1)
+; RV64IF-NEXT:    fcvt.s.w fa5, a0
+; RV64IF-NEXT:    fsw fa5, 0(a1)
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_w_demanded_bits:
@@ -1059,15 +1059,15 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi a0, a0, 1
-; RV32IF-NEXT:    fcvt.s.wu ft0, a0
-; RV32IF-NEXT:    fsw ft0, 0(a1)
+; RV32IF-NEXT:    fcvt.s.wu fa5, a0
+; RV32IF-NEXT:    fsw fa5, 0(a1)
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addiw a0, a0, 1
-; RV64IF-NEXT:    fcvt.s.wu ft0, a0
-; RV64IF-NEXT:    fsw ft0, 0(a1)
+; RV64IF-NEXT:    fcvt.s.wu fa5, a0
+; RV64IF-NEXT:    fsw fa5, 0(a1)
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_s_wu_demanded_bits:
@@ -1147,12 +1147,12 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_w_s_sat_i16:
 ; RV32IF:       # %bb.0: # %start
 ; RV32IF-NEXT:    lui a0, %hi(.LCPI24_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI24_0)(a0)
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI24_0)(a0)
 ; RV32IF-NEXT:    lui a0, 815104
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmax.s ft1, fa0, ft1
-; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    fmv.w.x fa4, a0
+; RV32IF-NEXT:    fmax.s fa4, fa0, fa4
+; RV32IF-NEXT:    fmin.s fa5, fa4, fa5
+; RV32IF-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IF-NEXT:    feq.s a1, fa0, fa0
 ; RV32IF-NEXT:    seqz a1, a1
 ; RV32IF-NEXT:    addi a1, a1, -1
@@ -1162,12 +1162,12 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV64IF-LABEL: fcvt_w_s_sat_i16:
 ; RV64IF:       # %bb.0: # %start
 ; RV64IF-NEXT:    lui a0, %hi(.LCPI24_0)
-; RV64IF-NEXT:    flw ft0, %lo(.LCPI24_0)(a0)
+; RV64IF-NEXT:    flw fa5, %lo(.LCPI24_0)(a0)
 ; RV64IF-NEXT:    lui a0, 815104
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmax.s ft1, fa0, ft1
-; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    fmv.w.x fa4, a0
+; RV64IF-NEXT:    fmax.s fa4, fa0, fa4
+; RV64IF-NEXT:    fmin.s fa5, fa4, fa5
+; RV64IF-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IF-NEXT:    feq.s a1, fa0, fa0
 ; RV64IF-NEXT:    seqz a1, a1
 ; RV64IF-NEXT:    addi a1, a1, -1
@@ -1299,21 +1299,21 @@ define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s_sat_i16:
 ; RV32IF:       # %bb.0: # %start
 ; RV32IF-NEXT:    lui a0, %hi(.LCPI26_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a0)
-; RV32IF-NEXT:    fmv.w.x ft1, zero
-; RV32IF-NEXT:    fmax.s ft1, fa0, ft1
-; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
-; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI26_0)(a0)
+; RV32IF-NEXT:    fmv.w.x fa4, zero
+; RV32IF-NEXT:    fmax.s fa4, fa0, fa4
+; RV32IF-NEXT:    fmin.s fa5, fa4, fa5
+; RV32IF-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_wu_s_sat_i16:
 ; RV64IF:       # %bb.0: # %start
 ; RV64IF-NEXT:    lui a0, %hi(.LCPI26_0)
-; RV64IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a0)
-; RV64IF-NEXT:    fmv.w.x ft1, zero
-; RV64IF-NEXT:    fmax.s ft1, fa0, ft1
-; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
-; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    flw fa5, %lo(.LCPI26_0)(a0)
+; RV64IF-NEXT:    fmv.w.x fa4, zero
+; RV64IF-NEXT:    fmax.s fa4, fa0, fa4
+; RV64IF-NEXT:    fmin.s fa5, fa4, fa5
+; RV64IF-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_sat_i16:
@@ -1431,12 +1431,12 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_w_s_sat_i8:
 ; RV32IF:       # %bb.0: # %start
 ; RV32IF-NEXT:    lui a0, 798720
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmax.s ft0, fa0, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fmax.s fa5, fa0, fa5
 ; RV32IF-NEXT:    lui a0, 274400
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmin.s ft0, ft0, ft1
-; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT:    fmv.w.x fa4, a0
+; RV32IF-NEXT:    fmin.s fa5, fa5, fa4
+; RV32IF-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IF-NEXT:    feq.s a1, fa0, fa0
 ; RV32IF-NEXT:    seqz a1, a1
 ; RV32IF-NEXT:    addi a1, a1, -1
@@ -1446,12 +1446,12 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
 ; RV64IF-LABEL: fcvt_w_s_sat_i8:
 ; RV64IF:       # %bb.0: # %start
 ; RV64IF-NEXT:    lui a0, 798720
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmax.s ft0, fa0, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fmax.s fa5, fa0, fa5
 ; RV64IF-NEXT:    lui a0, 274400
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmin.s ft0, ft0, ft1
-; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT:    fmv.w.x fa4, a0
+; RV64IF-NEXT:    fmin.s fa5, fa5, fa4
+; RV64IF-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IF-NEXT:    feq.s a1, fa0, fa0
 ; RV64IF-NEXT:    seqz a1, a1
 ; RV64IF-NEXT:    addi a1, a1, -1
@@ -1578,22 +1578,22 @@ define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
 define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s_sat_i8:
 ; RV32IF:       # %bb.0: # %start
-; RV32IF-NEXT:    fmv.w.x ft0, zero
-; RV32IF-NEXT:    fmax.s ft0, fa0, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, zero
+; RV32IF-NEXT:    fmax.s fa5, fa0, fa5
 ; RV32IF-NEXT:    lui a0, 276464
-; RV32IF-NEXT:    fmv.w.x ft1, a0
-; RV32IF-NEXT:    fmin.s ft0, ft0, ft1
-; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IF-NEXT:    fmv.w.x fa4, a0
+; RV32IF-NEXT:    fmin.s fa5, fa5, fa4
+; RV32IF-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_wu_s_sat_i8:
 ; RV64IF:       # %bb.0: # %start
-; RV64IF-NEXT:    fmv.w.x ft0, zero
-; RV64IF-NEXT:    fmax.s ft0, fa0, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, zero
+; RV64IF-NEXT:    fmax.s fa5, fa0, fa5
 ; RV64IF-NEXT:    lui a0, 276464
-; RV64IF-NEXT:    fmv.w.x ft1, a0
-; RV64IF-NEXT:    fmin.s ft0, ft0, ft1
-; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IF-NEXT:    fmv.w.x fa4, a0
+; RV64IF-NEXT:    fmin.s fa5, fa5, fa4
+; RV64IF-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_sat_i8:

diff  --git a/llvm/test/CodeGen/RISCV/float-imm.ll b/llvm/test/CodeGen/RISCV/float-imm.ll
index 909dddd2e45a..c5fbe6da87a6 100644
--- a/llvm/test/CodeGen/RISCV/float-imm.ll
+++ b/llvm/test/CodeGen/RISCV/float-imm.ll
@@ -18,8 +18,8 @@ define float @float_imm_op(float %a) nounwind {
 ; CHECK-LABEL: float_imm_op:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, 260096
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    fadd.s fa0, fa0, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    fadd.s fa0, fa0, fa5
 ; CHECK-NEXT:    ret
   %1 = fadd float %a, 1.0
   ret float %1

diff  --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index 7fd4ef83fa73..84f5c26acabd 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -641,28 +641,28 @@ define float @floor_f32(float %a) nounwind {
 ; RV32IF-LABEL: floor_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB17_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rdn
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB17_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: floor_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB17_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0, rdn
-; RV64IF-NEXT:    fcvt.s.w ft0, a0, rdn
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB17_2:
 ; RV64IF-NEXT:    ret
 ;
@@ -693,28 +693,28 @@ define float @ceil_f32(float %a) nounwind {
 ; RV32IF-LABEL: ceil_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB18_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rup
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB18_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: ceil_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB18_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0, rup
-; RV64IF-NEXT:    fcvt.s.w ft0, a0, rup
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0, rup
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB18_2:
 ; RV64IF-NEXT:    ret
 ;
@@ -745,28 +745,28 @@ define float @trunc_f32(float %a) nounwind {
 ; RV32IF-LABEL: trunc_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB19_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rtz
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB19_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: trunc_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB19_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
-; RV64IF-NEXT:    fcvt.s.w ft0, a0, rtz
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB19_2:
 ; RV64IF-NEXT:    ret
 ;
@@ -797,28 +797,28 @@ define float @rint_f32(float %a) nounwind {
 ; RV32IF-LABEL: rint_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB20_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0
-; RV32IF-NEXT:    fcvt.s.w ft0, a0
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB20_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: rint_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB20_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0
-; RV64IF-NEXT:    fcvt.s.w ft0, a0
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB20_2:
 ; RV64IF-NEXT:    ret
 ;
@@ -881,28 +881,28 @@ define float @round_f32(float %a) nounwind {
 ; RV32IF-LABEL: round_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB22_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rmm
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB22_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: round_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB22_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0, rmm
-; RV64IF-NEXT:    fcvt.s.w ft0, a0, rmm
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB22_2:
 ; RV64IF-NEXT:    ret
 ;
@@ -933,28 +933,28 @@ define float @roundeven_f32(float %a) nounwind {
 ; RV32IF-LABEL: roundeven_f32:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB23_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rne
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rne
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB23_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: roundeven_f32:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB23_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0, rne
-; RV64IF-NEXT:    fcvt.s.w ft0, a0, rne
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0, rne
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB23_2:
 ; RV64IF-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll
index 7c1a9631b014..145d2e315cf8 100644
--- a/llvm/test/CodeGen/RISCV/float-mem.ll
+++ b/llvm/test/CodeGen/RISCV/float-mem.ll
@@ -7,9 +7,9 @@
 define dso_local float @flw(ptr %a) nounwind {
 ; CHECKIF-LABEL: flw:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    flw ft0, 0(a0)
-; CHECKIF-NEXT:    flw ft1, 12(a0)
-; CHECKIF-NEXT:    fadd.s fa0, ft0, ft1
+; CHECKIF-NEXT:    flw fa5, 0(a0)
+; CHECKIF-NEXT:    flw fa4, 12(a0)
+; CHECKIF-NEXT:    fadd.s fa0, fa5, fa4
 ; CHECKIF-NEXT:    ret
   %1 = load float, ptr %a
   %2 = getelementptr float, ptr %a, i32 3
@@ -25,9 +25,9 @@ define dso_local void @fsw(ptr %a, float %b, float %c) nounwind {
 ; for the soft float ABI
 ; CHECKIF-LABEL: fsw:
 ; CHECKIF:       # %bb.0:
-; CHECKIF-NEXT:    fadd.s ft0, fa0, fa1
-; CHECKIF-NEXT:    fsw ft0, 0(a0)
-; CHECKIF-NEXT:    fsw ft0, 32(a0)
+; CHECKIF-NEXT:    fadd.s fa5, fa0, fa1
+; CHECKIF-NEXT:    fsw fa5, 0(a0)
+; CHECKIF-NEXT:    fsw fa5, 32(a0)
 ; CHECKIF-NEXT:    ret
   %1 = fadd float %b, %c
   store float %1, ptr %a
@@ -46,10 +46,10 @@ define dso_local float @flw_fsw_global(float %a, float %b) nounwind {
 ; CHECKIF:       # %bb.0:
 ; CHECKIF-NEXT:    fadd.s fa0, fa0, fa1
 ; CHECKIF-NEXT:    lui a0, %hi(G)
-; CHECKIF-NEXT:    flw ft0, %lo(G)(a0)
+; CHECKIF-NEXT:    flw fa5, %lo(G)(a0)
 ; CHECKIF-NEXT:    addi a1, a0, %lo(G)
 ; CHECKIF-NEXT:    fsw fa0, %lo(G)(a0)
-; CHECKIF-NEXT:    flw ft0, 36(a1)
+; CHECKIF-NEXT:    flw fa5, 36(a1)
 ; CHECKIF-NEXT:    fsw fa0, 36(a1)
 ; CHECKIF-NEXT:    ret
   %1 = fadd float %a, %b
@@ -66,8 +66,8 @@ define dso_local float @flw_fsw_constant(float %a) nounwind {
 ; RV32IF-LABEL: flw_fsw_constant:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 912092
-; RV32IF-NEXT:    flw ft0, -273(a0)
-; RV32IF-NEXT:    fadd.s fa0, fa0, ft0
+; RV32IF-NEXT:    flw fa5, -273(a0)
+; RV32IF-NEXT:    fadd.s fa0, fa0, fa5
 ; RV32IF-NEXT:    fsw fa0, -273(a0)
 ; RV32IF-NEXT:    ret
 ;
@@ -75,8 +75,8 @@ define dso_local float @flw_fsw_constant(float %a) nounwind {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 228023
 ; RV64IF-NEXT:    slli a0, a0, 2
-; RV64IF-NEXT:    flw ft0, -273(a0)
-; RV64IF-NEXT:    fadd.s fa0, fa0, ft0
+; RV64IF-NEXT:    flw fa5, -273(a0)
+; RV64IF-NEXT:    fadd.s fa0, fa0, fa5
 ; RV64IF-NEXT:    fsw fa0, -273(a0)
 ; RV64IF-NEXT:    ret
   %1 = inttoptr i32 3735928559 to ptr
@@ -97,8 +97,8 @@ define dso_local float @flw_stack(float %a) nounwind {
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    addi a0, sp, 4
 ; RV32IF-NEXT:    call notdead at plt
-; RV32IF-NEXT:    flw ft0, 4(sp)
-; RV32IF-NEXT:    fadd.s fa0, ft0, fs0
+; RV32IF-NEXT:    flw fa5, 4(sp)
+; RV32IF-NEXT:    fadd.s fa0, fa5, fs0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
@@ -112,8 +112,8 @@ define dso_local float @flw_stack(float %a) nounwind {
 ; RV64IF-NEXT:    fmv.s fs0, fa0
 ; RV64IF-NEXT:    mv a0, sp
 ; RV64IF-NEXT:    call notdead at plt
-; RV64IF-NEXT:    flw ft0, 0(sp)
-; RV64IF-NEXT:    fadd.s fa0, ft0, fs0
+; RV64IF-NEXT:    flw fa5, 0(sp)
+; RV64IF-NEXT:    fadd.s fa0, fa5, fs0
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
@@ -130,8 +130,8 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    fadd.s ft0, fa0, fa1
-; RV32IF-NEXT:    fsw ft0, 8(sp)
+; RV32IF-NEXT:    fadd.s fa5, fa0, fa1
+; RV32IF-NEXT:    fsw fa5, 8(sp)
 ; RV32IF-NEXT:    addi a0, sp, 8
 ; RV32IF-NEXT:    call notdead at plt
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -142,8 +142,8 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    fadd.s ft0, fa0, fa1
-; RV64IF-NEXT:    fsw ft0, 4(sp)
+; RV64IF-NEXT:    fadd.s fa5, fa0, fa1
+; RV64IF-NEXT:    fsw fa5, 4(sp)
 ; RV64IF-NEXT:    addi a0, sp, 4
 ; RV64IF-NEXT:    call notdead at plt
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
index 81f00b034810..a87fd92d5780 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
@@ -27,18 +27,18 @@ define i64 @test_floor_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB1_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0, rdn
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rdn
-; RV32IF-NEXT:    fsgnj.s fs0, ft0, fs0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32IF-NEXT:    fsgnj.s fs0, fa5, fs0
 ; RV32IF-NEXT:  .LBB1_2:
 ; RV32IF-NEXT:    lui a0, 913408
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fle.s s0, ft0, fs0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call __fixsfdi at plt
 ; RV32IF-NEXT:    lui a3, 524288
@@ -47,8 +47,8 @@ define i64 @test_floor_si64(float %x) nounwind {
 ; RV32IF-NEXT:    lui a1, 524288
 ; RV32IF-NEXT:  .LBB1_4:
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI1_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI1_0)(a2)
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI1_0)(a2)
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    beqz a2, .LBB1_6
 ; RV32IF-NEXT:  # %bb.5:
 ; RV32IF-NEXT:    addi a1, a3, -1
@@ -104,24 +104,24 @@ define i64 @test_floor_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB3_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0, rdn
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rdn
-; RV32IF-NEXT:    fsgnj.s fs0, ft0, fs0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32IF-NEXT:    fsgnj.s fs0, fa5, fs0
 ; RV32IF-NEXT:  .LBB3_2:
-; RV32IF-NEXT:    fmv.w.x ft0, zero
-; RV32IF-NEXT:    fle.s a0, ft0, fs0
+; RV32IF-NEXT:    fmv.w.x fa5, zero
+; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call __fixunssfdi at plt
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI3_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI3_0)(a2)
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI3_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    neg a2, a2
 ; RV32IF-NEXT:    or a0, a2, a0
 ; RV32IF-NEXT:    and a1, s0, a1
@@ -168,18 +168,18 @@ define i64 @test_ceil_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB5_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0, rup
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rup
-; RV32IF-NEXT:    fsgnj.s fs0, ft0, fs0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32IF-NEXT:    fsgnj.s fs0, fa5, fs0
 ; RV32IF-NEXT:  .LBB5_2:
 ; RV32IF-NEXT:    lui a0, 913408
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fle.s s0, ft0, fs0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call __fixsfdi at plt
 ; RV32IF-NEXT:    lui a3, 524288
@@ -188,8 +188,8 @@ define i64 @test_ceil_si64(float %x) nounwind {
 ; RV32IF-NEXT:    lui a1, 524288
 ; RV32IF-NEXT:  .LBB5_4:
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI5_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI5_0)(a2)
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI5_0)(a2)
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    beqz a2, .LBB5_6
 ; RV32IF-NEXT:  # %bb.5:
 ; RV32IF-NEXT:    addi a1, a3, -1
@@ -245,24 +245,24 @@ define i64 @test_ceil_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB7_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0, rup
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rup
-; RV32IF-NEXT:    fsgnj.s fs0, ft0, fs0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32IF-NEXT:    fsgnj.s fs0, fa5, fs0
 ; RV32IF-NEXT:  .LBB7_2:
-; RV32IF-NEXT:    fmv.w.x ft0, zero
-; RV32IF-NEXT:    fle.s a0, ft0, fs0
+; RV32IF-NEXT:    fmv.w.x fa5, zero
+; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call __fixunssfdi at plt
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI7_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI7_0)(a2)
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI7_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    neg a2, a2
 ; RV32IF-NEXT:    or a0, a2, a0
 ; RV32IF-NEXT:    and a1, s0, a1
@@ -309,18 +309,18 @@ define i64 @test_trunc_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB9_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0, rtz
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rtz
-; RV32IF-NEXT:    fsgnj.s fs0, ft0, fs0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32IF-NEXT:    fsgnj.s fs0, fa5, fs0
 ; RV32IF-NEXT:  .LBB9_2:
 ; RV32IF-NEXT:    lui a0, 913408
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fle.s s0, ft0, fs0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call __fixsfdi at plt
 ; RV32IF-NEXT:    lui a3, 524288
@@ -329,8 +329,8 @@ define i64 @test_trunc_si64(float %x) nounwind {
 ; RV32IF-NEXT:    lui a1, 524288
 ; RV32IF-NEXT:  .LBB9_4:
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI9_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI9_0)(a2)
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI9_0)(a2)
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    beqz a2, .LBB9_6
 ; RV32IF-NEXT:  # %bb.5:
 ; RV32IF-NEXT:    addi a1, a3, -1
@@ -386,24 +386,24 @@ define i64 @test_trunc_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB11_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0, rtz
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rtz
-; RV32IF-NEXT:    fsgnj.s fs0, ft0, fs0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32IF-NEXT:    fsgnj.s fs0, fa5, fs0
 ; RV32IF-NEXT:  .LBB11_2:
-; RV32IF-NEXT:    fmv.w.x ft0, zero
-; RV32IF-NEXT:    fle.s a0, ft0, fs0
+; RV32IF-NEXT:    fmv.w.x fa5, zero
+; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call __fixunssfdi at plt
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI11_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI11_0)(a2)
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI11_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    neg a2, a2
 ; RV32IF-NEXT:    or a0, a2, a0
 ; RV32IF-NEXT:    and a1, s0, a1
@@ -450,18 +450,18 @@ define i64 @test_round_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB13_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0, rmm
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rmm
-; RV32IF-NEXT:    fsgnj.s fs0, ft0, fs0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32IF-NEXT:    fsgnj.s fs0, fa5, fs0
 ; RV32IF-NEXT:  .LBB13_2:
 ; RV32IF-NEXT:    lui a0, 913408
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fle.s s0, ft0, fs0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call __fixsfdi at plt
 ; RV32IF-NEXT:    lui a3, 524288
@@ -470,8 +470,8 @@ define i64 @test_round_si64(float %x) nounwind {
 ; RV32IF-NEXT:    lui a1, 524288
 ; RV32IF-NEXT:  .LBB13_4:
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI13_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI13_0)(a2)
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI13_0)(a2)
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    beqz a2, .LBB13_6
 ; RV32IF-NEXT:  # %bb.5:
 ; RV32IF-NEXT:    addi a1, a3, -1
@@ -527,24 +527,24 @@ define i64 @test_round_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB15_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0, rmm
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rmm
-; RV32IF-NEXT:    fsgnj.s fs0, ft0, fs0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32IF-NEXT:    fsgnj.s fs0, fa5, fs0
 ; RV32IF-NEXT:  .LBB15_2:
-; RV32IF-NEXT:    fmv.w.x ft0, zero
-; RV32IF-NEXT:    fle.s a0, ft0, fs0
+; RV32IF-NEXT:    fmv.w.x fa5, zero
+; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call __fixunssfdi at plt
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI15_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI15_0)(a2)
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI15_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    neg a2, a2
 ; RV32IF-NEXT:    or a0, a2, a0
 ; RV32IF-NEXT:    and a1, s0, a1
@@ -591,18 +591,18 @@ define i64 @test_roundeven_si64(float %x) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB17_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0, rne
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rne
-; RV32IF-NEXT:    fsgnj.s fs0, ft0, fs0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rne
+; RV32IF-NEXT:    fsgnj.s fs0, fa5, fs0
 ; RV32IF-NEXT:  .LBB17_2:
 ; RV32IF-NEXT:    lui a0, 913408
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fle.s s0, ft0, fs0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fle.s s0, fa5, fs0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call __fixsfdi at plt
 ; RV32IF-NEXT:    lui a3, 524288
@@ -611,8 +611,8 @@ define i64 @test_roundeven_si64(float %x) nounwind {
 ; RV32IF-NEXT:    lui a1, 524288
 ; RV32IF-NEXT:  .LBB17_4:
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI17_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI17_0)(a2)
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI17_0)(a2)
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    beqz a2, .LBB17_6
 ; RV32IF-NEXT:  # %bb.5:
 ; RV32IF-NEXT:    addi a1, a3, -1
@@ -668,24 +668,24 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
 ; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    fmv.s fs0, fa0
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB19_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fs0, rne
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rne
-; RV32IF-NEXT:    fsgnj.s fs0, ft0, fs0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rne
+; RV32IF-NEXT:    fsgnj.s fs0, fa5, fs0
 ; RV32IF-NEXT:  .LBB19_2:
-; RV32IF-NEXT:    fmv.w.x ft0, zero
-; RV32IF-NEXT:    fle.s a0, ft0, fs0
+; RV32IF-NEXT:    fmv.w.x fa5, zero
+; RV32IF-NEXT:    fle.s a0, fa5, fs0
 ; RV32IF-NEXT:    neg s0, a0
 ; RV32IF-NEXT:    fmv.s fa0, fs0
 ; RV32IF-NEXT:    call __fixunssfdi at plt
 ; RV32IF-NEXT:    lui a2, %hi(.LCPI19_0)
-; RV32IF-NEXT:    flw ft0, %lo(.LCPI19_0)(a2)
+; RV32IF-NEXT:    flw fa5, %lo(.LCPI19_0)(a2)
 ; RV32IF-NEXT:    and a0, s0, a0
-; RV32IF-NEXT:    flt.s a2, ft0, fs0
+; RV32IF-NEXT:    flt.s a2, fa5, fs0
 ; RV32IF-NEXT:    neg a2, a2
 ; RV32IF-NEXT:    or a0, a2, a0
 ; RV32IF-NEXT:    and a1, s0, a1

diff  --git a/llvm/test/CodeGen/RISCV/float-round-conv.ll b/llvm/test/CodeGen/RISCV/float-round-conv.ll
index ec4b29ce6898..344950df43df 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv.ll
@@ -53,14 +53,14 @@ define i64 @test_floor_si64(float %x) {
 ; RV32IF-LABEL: test_floor_si64:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB3_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rdn
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB3_2:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
@@ -129,14 +129,14 @@ define i64 @test_floor_ui64(float %x) {
 ; RV32IF-LABEL: test_floor_ui64:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB7_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rdn
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB7_2:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
@@ -205,14 +205,14 @@ define i64 @test_ceil_si64(float %x) {
 ; RV32IF-LABEL: test_ceil_si64:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB11_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rup
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB11_2:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
@@ -281,14 +281,14 @@ define i64 @test_ceil_ui64(float %x) {
 ; RV32IF-LABEL: test_ceil_ui64:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB15_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rup
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB15_2:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
@@ -357,14 +357,14 @@ define i64 @test_trunc_si64(float %x) {
 ; RV32IF-LABEL: test_trunc_si64:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB19_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rtz
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB19_2:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
@@ -433,14 +433,14 @@ define i64 @test_trunc_ui64(float %x) {
 ; RV32IF-LABEL: test_trunc_ui64:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB23_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rtz
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB23_2:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
@@ -509,14 +509,14 @@ define i64 @test_round_si64(float %x) {
 ; RV32IF-LABEL: test_round_si64:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB27_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rmm
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB27_2:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
@@ -585,14 +585,14 @@ define i64 @test_round_ui64(float %x) {
 ; RV32IF-LABEL: test_round_ui64:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB31_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rmm
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB31_2:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
@@ -661,14 +661,14 @@ define i64 @test_roundeven_si64(float %x) {
 ; RV32IF-LABEL: test_roundeven_si64:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB35_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rne
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rne
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB35_2:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
@@ -737,14 +737,14 @@ define i64 @test_roundeven_ui64(float %x) {
 ; RV32IF-LABEL: test_roundeven_ui64:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB39_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rne
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rne
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB39_2:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    .cfi_def_cfa_offset 16
@@ -789,28 +789,28 @@ define float @test_floor_float(float %x) {
 ; RV32IF-LABEL: test_floor_float:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB40_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rdn
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB40_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_floor_float:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB40_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0, rdn
-; RV64IF-NEXT:    fcvt.s.w ft0, a0, rdn
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB40_2:
 ; RV64IF-NEXT:    ret
   %a = call float @llvm.floor.f32(float %x)
@@ -842,28 +842,28 @@ define float @test_ceil_float(float %x) {
 ; RV32IF-LABEL: test_ceil_float:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB41_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rup
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB41_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_ceil_float:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB41_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0, rup
-; RV64IF-NEXT:    fcvt.s.w ft0, a0, rup
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0, rup
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB41_2:
 ; RV64IF-NEXT:    ret
   %a = call float @llvm.ceil.f32(float %x)
@@ -895,28 +895,28 @@ define float @test_trunc_float(float %x) {
 ; RV32IF-LABEL: test_trunc_float:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB42_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rtz
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB42_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_trunc_float:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB42_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
-; RV64IF-NEXT:    fcvt.s.w ft0, a0, rtz
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB42_2:
 ; RV64IF-NEXT:    ret
   %a = call float @llvm.trunc.f32(float %x)
@@ -948,28 +948,28 @@ define float @test_round_float(float %x) {
 ; RV32IF-LABEL: test_round_float:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB43_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rmm
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB43_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_round_float:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB43_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0, rmm
-; RV64IF-NEXT:    fcvt.s.w ft0, a0, rmm
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB43_2:
 ; RV64IF-NEXT:    ret
   %a = call float @llvm.round.f32(float %x)
@@ -1001,28 +1001,28 @@ define float @test_roundeven_float(float %x) {
 ; RV32IF-LABEL: test_roundeven_float:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    lui a0, 307200
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fabs.s ft1, fa0
-; RV32IF-NEXT:    flt.s a0, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fabs.s fa4, fa0
+; RV32IF-NEXT:    flt.s a0, fa4, fa5
 ; RV32IF-NEXT:    beqz a0, .LBB44_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
-; RV32IF-NEXT:    fcvt.s.w ft0, a0, rne
-; RV32IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV32IF-NEXT:    fcvt.s.w fa5, a0, rne
+; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV32IF-NEXT:  .LBB44_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: test_roundeven_float:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    lui a0, 307200
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fabs.s ft1, fa0
-; RV64IF-NEXT:    flt.s a0, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fabs.s fa4, fa0
+; RV64IF-NEXT:    flt.s a0, fa4, fa5
 ; RV64IF-NEXT:    beqz a0, .LBB44_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fcvt.w.s a0, fa0, rne
-; RV64IF-NEXT:    fcvt.s.w ft0, a0, rne
-; RV64IF-NEXT:    fsgnj.s fa0, ft0, fa0
+; RV64IF-NEXT:    fcvt.s.w fa5, a0, rne
+; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
 ; RV64IF-NEXT:  .LBB44_2:
 ; RV64IF-NEXT:    ret
   %a = call float @llvm.roundeven.f32(float %x)

diff  --git a/llvm/test/CodeGen/RISCV/fp-imm.ll b/llvm/test/CodeGen/RISCV/fp-imm.ll
index d0a52a1274bf..e7b2e62b2f6c 100644
--- a/llvm/test/CodeGen/RISCV/fp-imm.ll
+++ b/llvm/test/CodeGen/RISCV/fp-imm.ll
@@ -91,8 +91,8 @@ define double @f64_negative_zero(ptr %pd) nounwind {
 ;
 ; RV32D-LABEL: f64_negative_zero:
 ; RV32D:       # %bb.0:
-; RV32D-NEXT:    fcvt.d.w ft0, zero
-; RV32D-NEXT:    fneg.d fa0, ft0
+; RV32D-NEXT:    fcvt.d.w fa5, zero
+; RV32D-NEXT:    fneg.d fa0, fa5
 ; RV32D-NEXT:    ret
 ;
 ; RV64F-LABEL: f64_negative_zero:
@@ -103,8 +103,8 @@ define double @f64_negative_zero(ptr %pd) nounwind {
 ;
 ; RV64D-LABEL: f64_negative_zero:
 ; RV64D:       # %bb.0:
-; RV64D-NEXT:    fmv.d.x ft0, zero
-; RV64D-NEXT:    fneg.d fa0, ft0
+; RV64D-NEXT:    fmv.d.x fa5, zero
+; RV64D-NEXT:    fneg.d fa0, fa5
 ; RV64D-NEXT:    ret
   ret double -0.0
 }

diff  --git a/llvm/test/CodeGen/RISCV/half-arith-strict.ll b/llvm/test/CodeGen/RISCV/half-arith-strict.ll
index 78c80706e779..54e314679d14 100644
--- a/llvm/test/CodeGen/RISCV/half-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith-strict.ll
@@ -21,10 +21,10 @@ define half @fadd_h(half %a, half %b) nounwind strictfp {
 ;
 ; CHECK-ZFHMIN-LABEL: fadd_h:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECK-ZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECK-ZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.fadd.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -39,10 +39,10 @@ define half @fsub_h(half %a, half %b) nounwind strictfp {
 ;
 ; CHECK-ZFHMIN-LABEL: fsub_h:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECK-ZFHMIN-NEXT:    fsub.s ft0, ft1, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECK-ZFHMIN-NEXT:    fsub.s fa5, fa4, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.fsub.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -57,10 +57,10 @@ define half @fmul_h(half %a, half %b) nounwind strictfp {
 ;
 ; CHECK-ZFHMIN-LABEL: fmul_h:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECK-ZFHMIN-NEXT:    fmul.s ft0, ft1, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECK-ZFHMIN-NEXT:    fmul.s fa5, fa4, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.fmul.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -75,10 +75,10 @@ define half @fdiv_h(half %a, half %b) nounwind strictfp {
 ;
 ; CHECK-ZFHMIN-LABEL: fdiv_h:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECK-ZFHMIN-NEXT:    fdiv.s ft0, ft1, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECK-ZFHMIN-NEXT:    fdiv.s fa5, fa4, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.fdiv.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -93,9 +93,9 @@ define half @fsqrt_h(half %a) nounwind strictfp {
 ;
 ; CHECK-ZFHMIN-LABEL: fsqrt_h:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK-ZFHMIN-NEXT:    fsqrt.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK-ZFHMIN-NEXT:    fsqrt.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.sqrt.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -124,11 +124,11 @@ define half @fmadd_h(half %a, half %b, half %c) nounwind strictfp {
 ;
 ; CHECK-ZFHMIN-LABEL: fmadd_h:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa2
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECK-ZFHMIN-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa2
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECK-ZFHMIN-NEXT:    fmadd.s fa5, fa3, fa4, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.fma.f16(half %a, half %b, half %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -138,25 +138,25 @@ declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata,
 define half @fmsub_h(half %a, half %b, half %c) nounwind strictfp {
 ; CHECK-LABEL: fmsub_h:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    fadd.h ft0, fa2, ft0
-; CHECK-NEXT:    fmsub.h fa0, fa0, fa1, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    fadd.h fa5, fa2, fa5
+; CHECK-NEXT:    fmsub.h fa0, fa0, fa1, fa5
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-ZFHMIN-LABEL: fmsub_h:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa2
-; CHECK-ZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECK-ZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECK-ZFHMIN-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa2
+; CHECK-ZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECK-ZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECK-ZFHMIN-NEXT:    fmadd.s fa5, fa3, fa4, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %c_ = fadd half 0.0, %c ; avoid negation using xor
   %negc = fneg half %c_
@@ -167,32 +167,32 @@ define half @fmsub_h(half %a, half %b, half %c) nounwind strictfp {
 define half @fnmadd_h(half %a, half %b, half %c) nounwind strictfp {
 ; CHECK-LABEL: fnmadd_h:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    fadd.h ft1, fa0, ft0
-; CHECK-NEXT:    fadd.h ft0, fa2, ft0
-; CHECK-NEXT:    fnmadd.h fa0, ft1, fa1, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    fadd.h fa4, fa0, fa5
+; CHECK-NEXT:    fadd.h fa5, fa2, fa5
+; CHECK-NEXT:    fnmadd.h fa0, fa4, fa1, fa5
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-ZFHMIN-LABEL: fnmadd_h:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK-ZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECK-ZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft2, fa2
-; CHECK-ZFHMIN-NEXT:    fadd.s ft1, ft2, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECK-ZFHMIN-NEXT:    fneg.s ft1, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft2, fa1
-; CHECK-ZFHMIN-NEXT:    fmadd.s ft0, ft0, ft2, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK-ZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECK-ZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa3, fa2
+; CHECK-ZFHMIN-NEXT:    fadd.s fa4, fa3, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECK-ZFHMIN-NEXT:    fneg.s fa4, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa3, fa1
+; CHECK-ZFHMIN-NEXT:    fmadd.s fa5, fa5, fa3, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %a_ = fadd half 0.0, %a
   %c_ = fadd half 0.0, %c
@@ -205,32 +205,32 @@ define half @fnmadd_h(half %a, half %b, half %c) nounwind strictfp {
 define half @fnmadd_h_2(half %a, half %b, half %c) nounwind strictfp {
 ; CHECK-LABEL: fnmadd_h_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    fadd.h ft1, fa1, ft0
-; CHECK-NEXT:    fadd.h ft0, fa2, ft0
-; CHECK-NEXT:    fnmadd.h fa0, ft1, fa0, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    fadd.h fa4, fa1, fa5
+; CHECK-NEXT:    fadd.h fa5, fa2, fa5
+; CHECK-NEXT:    fnmadd.h fa0, fa4, fa0, fa5
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-ZFHMIN-LABEL: fnmadd_h_2:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECK-ZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECK-ZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft2, fa2
-; CHECK-ZFHMIN-NEXT:    fadd.s ft1, ft2, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECK-ZFHMIN-NEXT:    fneg.s ft1, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECK-ZFHMIN-NEXT:    fmadd.s ft0, ft2, ft0, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECK-ZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECK-ZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa3, fa2
+; CHECK-ZFHMIN-NEXT:    fadd.s fa4, fa3, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECK-ZFHMIN-NEXT:    fneg.s fa4, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECK-ZFHMIN-NEXT:    fmadd.s fa5, fa3, fa5, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %b_ = fadd half 0.0, %b
   %c_ = fadd half 0.0, %c
@@ -243,25 +243,25 @@ define half @fnmadd_h_2(half %a, half %b, half %c) nounwind strictfp {
 define half @fnmsub_h(half %a, half %b, half %c) nounwind strictfp {
 ; CHECK-LABEL: fnmsub_h:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    fadd.h ft0, fa0, ft0
-; CHECK-NEXT:    fnmsub.h fa0, ft0, fa1, fa2
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    fadd.h fa5, fa0, fa5
+; CHECK-NEXT:    fnmsub.h fa0, fa5, fa1, fa2
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-ZFHMIN-LABEL: fnmsub_h:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK-ZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECK-ZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, fa2
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft2, fa1
-; CHECK-ZFHMIN-NEXT:    fmadd.s ft0, ft0, ft2, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK-ZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECK-ZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa2
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa3, fa1
+; CHECK-ZFHMIN-NEXT:    fmadd.s fa5, fa5, fa3, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %a_ = fadd half 0.0, %a
   %nega = fneg half %a_
@@ -272,25 +272,25 @@ define half @fnmsub_h(half %a, half %b, half %c) nounwind strictfp {
 define half @fnmsub_h_2(half %a, half %b, half %c) nounwind strictfp {
 ; CHECK-LABEL: fnmsub_h_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    fadd.h ft0, fa1, ft0
-; CHECK-NEXT:    fnmsub.h fa0, ft0, fa0, fa2
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    fadd.h fa5, fa1, fa5
+; CHECK-NEXT:    fnmsub.h fa0, fa5, fa0, fa2
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-ZFHMIN-LABEL: fnmsub_h_2:
 ; CHECK-ZFHMIN:       # %bb.0:
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECK-ZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECK-ZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft1, fa2
-; CHECK-ZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECK-ZFHMIN-NEXT:    fmadd.s ft0, ft2, ft0, ft1
-; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECK-ZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECK-ZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa4, fa2
+; CHECK-ZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECK-ZFHMIN-NEXT:    fmadd.s fa5, fa3, fa5, fa4
+; CHECK-ZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK-ZFHMIN-NEXT:    ret
   %b_ = fadd half 0.0, %b
   %negb = fneg half %b_

diff  --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index 2124de02dc28..c0a6903c3c57 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -77,10 +77,10 @@ define half @fadd_s(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fadd_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fadd half %a, %b
   ret half %1
@@ -146,10 +146,10 @@ define half @fsub_s(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fsub_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fsub.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fsub.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fsub half %a, %b
   ret half %1
@@ -215,10 +215,10 @@ define half @fmul_s(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fmul_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fmul.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fmul.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fmul half %a, %b
   ret half %1
@@ -284,10 +284,10 @@ define half @fdiv_s(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fdiv_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fdiv.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fdiv.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fdiv half %a, %b
   ret half %1
@@ -329,9 +329,9 @@ define half @fsqrt_s(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fsqrt_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fsqrt.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fsqrt.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.sqrt.f16(half %a)
   ret half %1
@@ -429,9 +429,9 @@ define half @fsgnj_s(half %a, half %b) nounwind {
 define i32 @fneg_s(half %a, half %b) nounwind {
 ; CHECKIZFH-LABEL: fneg_s:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fadd.h ft0, fa0, fa0
-; CHECKIZFH-NEXT:    fneg.h ft1, ft0
-; CHECKIZFH-NEXT:    feq.h a0, ft0, ft1
+; CHECKIZFH-NEXT:    fadd.h fa5, fa0, fa0
+; CHECKIZFH-NEXT:    fneg.h fa4, fa5
+; CHECKIZFH-NEXT:    feq.h a0, fa5, fa4
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fneg_s:
@@ -498,14 +498,14 @@ define i32 @fneg_s(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fneg_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fneg.s ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fneg.s fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fadd half %a, %a
   %2 = fneg half %1
@@ -519,8 +519,8 @@ define i32 @fneg_s(half %a, half %b) nounwind {
 define half @fsgnjn_s(half %a, half %b) nounwind {
 ; CHECKIZFH-LABEL: fsgnjn_s:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fadd.h ft0, fa0, fa1
-; CHECKIZFH-NEXT:    fsgnjn.h fa0, fa0, ft0
+; CHECKIZFH-NEXT:    fadd.h fa5, fa0, fa1
+; CHECKIZFH-NEXT:    fsgnjn.h fa0, fa0, fa5
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fsgnjn_s:
@@ -604,15 +604,15 @@ define half @fsgnjn_s(half %a, half %b) nounwind {
 ; CHECK-RV32-FSGNJ-LABEL: fsgnjn_s:
 ; CHECK-RV32-FSGNJ:       # %bb.0:
 ; CHECK-RV32-FSGNJ-NEXT:    addi sp, sp, -16
-; CHECK-RV32-FSGNJ-NEXT:    fcvt.s.h ft0, fa1
-; CHECK-RV32-FSGNJ-NEXT:    fcvt.s.h ft1, fa0
-; CHECK-RV32-FSGNJ-NEXT:    fadd.s ft0, ft1, ft0
-; CHECK-RV32-FSGNJ-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-RV32-FSGNJ-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-RV32-FSGNJ-NEXT:    fneg.s ft0, ft0
-; CHECK-RV32-FSGNJ-NEXT:    fcvt.h.s ft0, ft0
+; CHECK-RV32-FSGNJ-NEXT:    fcvt.s.h fa5, fa1
+; CHECK-RV32-FSGNJ-NEXT:    fcvt.s.h fa4, fa0
+; CHECK-RV32-FSGNJ-NEXT:    fadd.s fa5, fa4, fa5
+; CHECK-RV32-FSGNJ-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-RV32-FSGNJ-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-RV32-FSGNJ-NEXT:    fneg.s fa5, fa5
+; CHECK-RV32-FSGNJ-NEXT:    fcvt.h.s fa5, fa5
 ; CHECK-RV32-FSGNJ-NEXT:    fsh fa0, 8(sp)
-; CHECK-RV32-FSGNJ-NEXT:    fsh ft0, 12(sp)
+; CHECK-RV32-FSGNJ-NEXT:    fsh fa5, 12(sp)
 ; CHECK-RV32-FSGNJ-NEXT:    lbu a0, 9(sp)
 ; CHECK-RV32-FSGNJ-NEXT:    lbu a1, 13(sp)
 ; CHECK-RV32-FSGNJ-NEXT:    andi a0, a0, 127
@@ -626,15 +626,15 @@ define half @fsgnjn_s(half %a, half %b) nounwind {
 ; CHECK-RV64-FSGNJ-LABEL: fsgnjn_s:
 ; CHECK-RV64-FSGNJ:       # %bb.0:
 ; CHECK-RV64-FSGNJ-NEXT:    addi sp, sp, -16
-; CHECK-RV64-FSGNJ-NEXT:    fcvt.s.h ft0, fa1
-; CHECK-RV64-FSGNJ-NEXT:    fcvt.s.h ft1, fa0
-; CHECK-RV64-FSGNJ-NEXT:    fadd.s ft0, ft1, ft0
-; CHECK-RV64-FSGNJ-NEXT:    fcvt.h.s ft0, ft0
-; CHECK-RV64-FSGNJ-NEXT:    fcvt.s.h ft0, ft0
-; CHECK-RV64-FSGNJ-NEXT:    fneg.s ft0, ft0
-; CHECK-RV64-FSGNJ-NEXT:    fcvt.h.s ft0, ft0
+; CHECK-RV64-FSGNJ-NEXT:    fcvt.s.h fa5, fa1
+; CHECK-RV64-FSGNJ-NEXT:    fcvt.s.h fa4, fa0
+; CHECK-RV64-FSGNJ-NEXT:    fadd.s fa5, fa4, fa5
+; CHECK-RV64-FSGNJ-NEXT:    fcvt.h.s fa5, fa5
+; CHECK-RV64-FSGNJ-NEXT:    fcvt.s.h fa5, fa5
+; CHECK-RV64-FSGNJ-NEXT:    fneg.s fa5, fa5
+; CHECK-RV64-FSGNJ-NEXT:    fcvt.h.s fa5, fa5
 ; CHECK-RV64-FSGNJ-NEXT:    fsh fa0, 0(sp)
-; CHECK-RV64-FSGNJ-NEXT:    fsh ft0, 8(sp)
+; CHECK-RV64-FSGNJ-NEXT:    fsh fa5, 8(sp)
 ; CHECK-RV64-FSGNJ-NEXT:    lbu a0, 1(sp)
 ; CHECK-RV64-FSGNJ-NEXT:    lbu a1, 9(sp)
 ; CHECK-RV64-FSGNJ-NEXT:    andi a0, a0, 127
@@ -699,9 +699,9 @@ declare half @llvm.fabs.f16(half)
 define half @fabs_s(half %a, half %b) nounwind {
 ; CHECKIZFH-LABEL: fabs_s:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fadd.h ft0, fa0, fa1
-; CHECKIZFH-NEXT:    fabs.h ft1, ft0
-; CHECKIZFH-NEXT:    fadd.h fa0, ft1, ft0
+; CHECKIZFH-NEXT:    fadd.h fa5, fa0, fa1
+; CHECKIZFH-NEXT:    fabs.h fa4, fa5
+; CHECKIZFH-NEXT:    fadd.h fa0, fa4, fa5
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fabs_s:
@@ -780,16 +780,16 @@ define half @fabs_s(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fabs_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fabs.s ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fabs.s fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fadd half %a, %b
   %2 = call half @llvm.fabs.f16(half %1)
@@ -859,10 +859,10 @@ define half @fmin_s(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fmin_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fmin.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fmin.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.minnum.f16(half %a, half %b)
   ret half %1
@@ -930,10 +930,10 @@ define half @fmax_s(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fmax_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fmax.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fmax.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.maxnum.f16(half %a, half %b)
   ret half %1
@@ -1015,11 +1015,11 @@ define half @fmadd_s(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fmadd_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa2
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECKIZFHMIN-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa2
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECKIZFHMIN-NEXT:    fmadd.s fa5, fa3, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.fma.f16(half %a, half %b, half %c)
   ret half %1
@@ -1028,9 +1028,9 @@ define half @fmadd_s(half %a, half %b, half %c) nounwind {
 define half @fmsub_s(half %a, half %b, half %c) nounwind {
 ; CHECKIZFH-LABEL: fmsub_s:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fmv.h.x ft0, zero
-; CHECKIZFH-NEXT:    fadd.h ft0, fa2, ft0
-; CHECKIZFH-NEXT:    fmsub.h fa0, fa0, fa1, ft0
+; CHECKIZFH-NEXT:    fmv.h.x fa5, zero
+; CHECKIZFH-NEXT:    fadd.h fa5, fa2, fa5
+; CHECKIZFH-NEXT:    fmsub.h fa0, fa0, fa1, fa5
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_s:
@@ -1123,18 +1123,18 @@ define half @fmsub_s(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fmsub_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa2
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECKIZFHMIN-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa2
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECKIZFHMIN-NEXT:    fmadd.s fa5, fa3, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %c_ = fadd half 0.0, %c ; avoid negation using xor
   %negc = fsub half -0.0, %c_
@@ -1145,10 +1145,10 @@ define half @fmsub_s(half %a, half %b, half %c) nounwind {
 define half @fnmadd_s(half %a, half %b, half %c) nounwind {
 ; CHECKIZFH-LABEL: fnmadd_s:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fmv.h.x ft0, zero
-; CHECKIZFH-NEXT:    fadd.h ft1, fa0, ft0
-; CHECKIZFH-NEXT:    fadd.h ft0, fa2, ft0
-; CHECKIZFH-NEXT:    fnmadd.h fa0, ft1, fa1, ft0
+; CHECKIZFH-NEXT:    fmv.h.x fa5, zero
+; CHECKIZFH-NEXT:    fadd.h fa4, fa0, fa5
+; CHECKIZFH-NEXT:    fadd.h fa5, fa2, fa5
+; CHECKIZFH-NEXT:    fnmadd.h fa0, fa4, fa1, fa5
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s:
@@ -1269,24 +1269,24 @@ define half @fnmadd_s(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fnmadd_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa2
-; CHECKIZFHMIN-NEXT:    fadd.s ft1, ft2, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fneg.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa1
-; CHECKIZFHMIN-NEXT:    fmadd.s ft0, ft0, ft2, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa2
+; CHECKIZFHMIN-NEXT:    fadd.s fa4, fa3, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fneg.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa1
+; CHECKIZFHMIN-NEXT:    fmadd.s fa5, fa5, fa3, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %a_ = fadd half 0.0, %a
   %c_ = fadd half 0.0, %c
@@ -1299,10 +1299,10 @@ define half @fnmadd_s(half %a, half %b, half %c) nounwind {
 define half @fnmadd_s_2(half %a, half %b, half %c) nounwind {
 ; CHECKIZFH-LABEL: fnmadd_s_2:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fmv.h.x ft0, zero
-; CHECKIZFH-NEXT:    fadd.h ft1, fa1, ft0
-; CHECKIZFH-NEXT:    fadd.h ft0, fa2, ft0
-; CHECKIZFH-NEXT:    fnmadd.h fa0, ft1, fa0, ft0
+; CHECKIZFH-NEXT:    fmv.h.x fa5, zero
+; CHECKIZFH-NEXT:    fadd.h fa4, fa1, fa5
+; CHECKIZFH-NEXT:    fadd.h fa5, fa2, fa5
+; CHECKIZFH-NEXT:    fnmadd.h fa0, fa4, fa0, fa5
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s_2:
@@ -1423,24 +1423,24 @@ define half @fnmadd_s_2(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fnmadd_s_2:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa2
-; CHECKIZFHMIN-NEXT:    fadd.s ft1, ft2, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fneg.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECKIZFHMIN-NEXT:    fmadd.s ft0, ft2, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa2
+; CHECKIZFHMIN-NEXT:    fadd.s fa4, fa3, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fneg.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECKIZFHMIN-NEXT:    fmadd.s fa5, fa3, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %b_ = fadd half 0.0, %b
   %c_ = fadd half 0.0, %c
@@ -1465,8 +1465,8 @@ define half @fnmadd_s_3(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFH-LABEL: fnmadd_s_3:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fmadd.h ft0, fa0, fa1, fa2
-; CHECKIZFH-NEXT:    fneg.h fa0, ft0
+; CHECKIZFH-NEXT:    fmadd.h fa5, fa0, fa1, fa2
+; CHECKIZFH-NEXT:    fneg.h fa0, fa5
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s_3:
@@ -1541,14 +1541,14 @@ define half @fnmadd_s_3(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fnmadd_s_3:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa2
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECKIZFHMIN-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa2
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECKIZFHMIN-NEXT:    fmadd.s fa5, fa3, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.fma.f16(half %a, half %b, half %c)
   %neg = fneg half %1
@@ -1644,14 +1644,14 @@ define half @fnmadd_nsz(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fnmadd_nsz:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa2
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECKIZFHMIN-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa2
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECKIZFHMIN-NEXT:    fmadd.s fa5, fa3, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call nsz half @llvm.fma.f16(half %a, half %b, half %c)
   %neg = fneg nsz half %1
@@ -1661,9 +1661,9 @@ define half @fnmadd_nsz(half %a, half %b, half %c) nounwind {
 define half @fnmsub_s(half %a, half %b, half %c) nounwind {
 ; CHECKIZFH-LABEL: fnmsub_s:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fmv.h.x ft0, zero
-; CHECKIZFH-NEXT:    fadd.h ft0, fa0, ft0
-; CHECKIZFH-NEXT:    fnmsub.h fa0, ft0, fa1, fa2
+; CHECKIZFH-NEXT:    fmv.h.x fa5, zero
+; CHECKIZFH-NEXT:    fadd.h fa5, fa0, fa5
+; CHECKIZFH-NEXT:    fnmsub.h fa0, fa5, fa1, fa2
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s:
@@ -1754,18 +1754,18 @@ define half @fnmsub_s(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fnmsub_s:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa2
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa1
-; CHECKIZFHMIN-NEXT:    fmadd.s ft0, ft0, ft2, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa2
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa1
+; CHECKIZFHMIN-NEXT:    fmadd.s fa5, fa5, fa3, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %a_ = fadd half 0.0, %a
   %nega = fsub half -0.0, %a_
@@ -1776,9 +1776,9 @@ define half @fnmsub_s(half %a, half %b, half %c) nounwind {
 define half @fnmsub_s_2(half %a, half %b, half %c) nounwind {
 ; CHECKIZFH-LABEL: fnmsub_s_2:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fmv.h.x ft0, zero
-; CHECKIZFH-NEXT:    fadd.h ft0, fa1, ft0
-; CHECKIZFH-NEXT:    fnmsub.h fa0, ft0, fa0, fa2
+; CHECKIZFH-NEXT:    fmv.h.x fa5, zero
+; CHECKIZFH-NEXT:    fadd.h fa5, fa1, fa5
+; CHECKIZFH-NEXT:    fnmsub.h fa0, fa5, fa0, fa2
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s_2:
@@ -1871,18 +1871,18 @@ define half @fnmsub_s_2(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fnmsub_s_2:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa2
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECKIZFHMIN-NEXT:    fmadd.s ft0, ft2, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa2
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECKIZFHMIN-NEXT:    fmadd.s fa5, fa3, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %b_ = fadd half 0.0, %b
   %negb = fsub half -0.0, %b_
@@ -1974,14 +1974,14 @@ define half @fmadd_s_contract(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fmadd_s_contract:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fmul.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa2
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fmul.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa2
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fmul contract half %a, %b
   %2 = fadd contract half %1, %c
@@ -1991,9 +1991,9 @@ define half @fmadd_s_contract(half %a, half %b, half %c) nounwind {
 define half @fmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; CHECKIZFH-LABEL: fmsub_s_contract:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fmv.h.x ft0, zero
-; CHECKIZFH-NEXT:    fadd.h ft0, fa2, ft0
-; CHECKIZFH-NEXT:    fmsub.h fa0, fa0, fa1, ft0
+; CHECKIZFH-NEXT:    fmv.h.x fa5, zero
+; CHECKIZFH-NEXT:    fadd.h fa5, fa2, fa5
+; CHECKIZFH-NEXT:    fmsub.h fa0, fa0, fa1, fa5
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fmsub_s_contract:
@@ -2086,18 +2086,18 @@ define half @fmsub_s_contract(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fmsub_s_contract:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa2
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECKIZFHMIN-NEXT:    fmul.s ft1, ft2, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fsub.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa2
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECKIZFHMIN-NEXT:    fmul.s fa4, fa3, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fsub.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %c_ = fadd half 0.0, %c ; avoid negation using xor
   %1 = fmul contract half %a, %b
@@ -2108,11 +2108,11 @@ define half @fmsub_s_contract(half %a, half %b, half %c) nounwind {
 define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
 ; CHECKIZFH-LABEL: fnmadd_s_contract:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fmv.h.x ft0, zero
-; CHECKIZFH-NEXT:    fadd.h ft1, fa0, ft0
-; CHECKIZFH-NEXT:    fadd.h ft2, fa1, ft0
-; CHECKIZFH-NEXT:    fadd.h ft0, fa2, ft0
-; CHECKIZFH-NEXT:    fnmadd.h fa0, ft1, ft2, ft0
+; CHECKIZFH-NEXT:    fmv.h.x fa5, zero
+; CHECKIZFH-NEXT:    fadd.h fa4, fa0, fa5
+; CHECKIZFH-NEXT:    fadd.h fa3, fa1, fa5
+; CHECKIZFH-NEXT:    fadd.h fa5, fa2, fa5
+; CHECKIZFH-NEXT:    fnmadd.h fa0, fa4, fa3, fa5
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmadd_s_contract:
@@ -2239,27 +2239,27 @@ define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fnmadd_s_contract:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa1
-; CHECKIZFHMIN-NEXT:    fadd.s ft2, ft2, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft2, ft2
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft3, fa2
-; CHECKIZFHMIN-NEXT:    fadd.s ft1, ft3, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, ft2
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fmul.s ft0, ft0, ft2
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fneg.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fsub.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa1
+; CHECKIZFHMIN-NEXT:    fadd.s fa3, fa3, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa3, fa3
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa2, fa2
+; CHECKIZFHMIN-NEXT:    fadd.s fa4, fa2, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa3
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fmul.s fa5, fa5, fa3
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fneg.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fsub.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %a_ = fadd half 0.0, %a ; avoid negation using xor
   %b_ = fadd half 0.0, %b ; avoid negation using xor
@@ -2273,10 +2273,10 @@ define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
 define half @fnmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; CHECKIZFH-LABEL: fnmsub_s_contract:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fmv.h.x ft0, zero
-; CHECKIZFH-NEXT:    fadd.h ft1, fa0, ft0
-; CHECKIZFH-NEXT:    fadd.h ft0, fa1, ft0
-; CHECKIZFH-NEXT:    fnmsub.h fa0, ft1, ft0, fa2
+; CHECKIZFH-NEXT:    fmv.h.x fa5, zero
+; CHECKIZFH-NEXT:    fadd.h fa4, fa0, fa5
+; CHECKIZFH-NEXT:    fadd.h fa5, fa1, fa5
+; CHECKIZFH-NEXT:    fnmsub.h fa0, fa4, fa5, fa2
 ; CHECKIZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fnmsub_s_contract:
@@ -2383,21 +2383,21 @@ define half @fnmsub_s_contract(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fnmsub_s_contract:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa1
-; CHECKIZFHMIN-NEXT:    fadd.s ft1, ft2, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fmul.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa2
-; CHECKIZFHMIN-NEXT:    fsub.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa1
+; CHECKIZFHMIN-NEXT:    fadd.s fa4, fa3, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fmul.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa2
+; CHECKIZFHMIN-NEXT:    fsub.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %a_ = fadd half 0.0, %a ; avoid negation using xor
   %b_ = fadd half 0.0, %b ; avoid negation using xor

diff  --git a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
index e6e95c1dbaf2..11f66d7290ab 100644
--- a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
@@ -121,10 +121,10 @@ define half @fcopysign_fneg(half %a, half %b) nounwind {
 ;
 ; RV32IZFH-LABEL: fcopysign_fneg:
 ; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    fmv.h.x ft0, a1
-; RV32IZFH-NEXT:    fmv.h.x ft1, a0
-; RV32IZFH-NEXT:    fsgnjn.h ft0, ft1, ft0
-; RV32IZFH-NEXT:    fmv.x.h a0, ft0
+; RV32IZFH-NEXT:    fmv.h.x fa5, a1
+; RV32IZFH-NEXT:    fmv.h.x fa4, a0
+; RV32IZFH-NEXT:    fsgnjn.h fa5, fa4, fa5
+; RV32IZFH-NEXT:    fmv.x.h a0, fa5
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64I-LABEL: fcopysign_fneg:
@@ -139,51 +139,51 @@ define half @fcopysign_fneg(half %a, half %b) nounwind {
 ;
 ; RV64IZFH-LABEL: fcopysign_fneg:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    fmv.h.x ft0, a1
-; RV64IZFH-NEXT:    fmv.h.x ft1, a0
-; RV64IZFH-NEXT:    fsgnjn.h ft0, ft1, ft0
-; RV64IZFH-NEXT:    fmv.x.h a0, ft0
+; RV64IZFH-NEXT:    fmv.h.x fa5, a1
+; RV64IZFH-NEXT:    fmv.h.x fa4, a0
+; RV64IZFH-NEXT:    fsgnjn.h fa5, fa4, fa5
+; RV64IZFH-NEXT:    fmv.x.h a0, fa5
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IZFHMIN-LABEL: fcopysign_fneg:
 ; RV32IZFHMIN:       # %bb.0:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
-; RV32IZFHMIN-NEXT:    fmv.h.x ft0, a0
-; RV32IZFHMIN-NEXT:    fmv.h.x ft1, a1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; RV32IZFHMIN-NEXT:    fneg.s ft1, ft1
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; RV32IZFHMIN-NEXT:    fsh ft0, 8(sp)
-; RV32IZFHMIN-NEXT:    fsh ft1, 12(sp)
+; RV32IZFHMIN-NEXT:    fmv.h.x fa5, a0
+; RV32IZFHMIN-NEXT:    fmv.h.x fa4, a1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; RV32IZFHMIN-NEXT:    fneg.s fa4, fa4
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; RV32IZFHMIN-NEXT:    fsh fa5, 8(sp)
+; RV32IZFHMIN-NEXT:    fsh fa4, 12(sp)
 ; RV32IZFHMIN-NEXT:    lbu a0, 9(sp)
 ; RV32IZFHMIN-NEXT:    lbu a1, 13(sp)
 ; RV32IZFHMIN-NEXT:    andi a0, a0, 127
 ; RV32IZFHMIN-NEXT:    andi a1, a1, 128
 ; RV32IZFHMIN-NEXT:    or a0, a0, a1
 ; RV32IZFHMIN-NEXT:    sb a0, 9(sp)
-; RV32IZFHMIN-NEXT:    flh ft0, 8(sp)
-; RV32IZFHMIN-NEXT:    fmv.x.h a0, ft0
+; RV32IZFHMIN-NEXT:    flh fa5, 8(sp)
+; RV32IZFHMIN-NEXT:    fmv.x.h a0, fa5
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: fcopysign_fneg:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
-; RV64IZFHMIN-NEXT:    fmv.h.x ft0, a0
-; RV64IZFHMIN-NEXT:    fmv.h.x ft1, a1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; RV64IZFHMIN-NEXT:    fneg.s ft1, ft1
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft1, ft1
-; RV64IZFHMIN-NEXT:    fsh ft0, 0(sp)
-; RV64IZFHMIN-NEXT:    fsh ft1, 8(sp)
+; RV64IZFHMIN-NEXT:    fmv.h.x fa5, a0
+; RV64IZFHMIN-NEXT:    fmv.h.x fa4, a1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; RV64IZFHMIN-NEXT:    fneg.s fa4, fa4
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa4, fa4
+; RV64IZFHMIN-NEXT:    fsh fa5, 0(sp)
+; RV64IZFHMIN-NEXT:    fsh fa4, 8(sp)
 ; RV64IZFHMIN-NEXT:    lbu a0, 1(sp)
 ; RV64IZFHMIN-NEXT:    lbu a1, 9(sp)
 ; RV64IZFHMIN-NEXT:    andi a0, a0, 127
 ; RV64IZFHMIN-NEXT:    andi a1, a1, 128
 ; RV64IZFHMIN-NEXT:    or a0, a0, a1
 ; RV64IZFHMIN-NEXT:    sb a0, 1(sp)
-; RV64IZFHMIN-NEXT:    flh ft0, 0(sp)
-; RV64IZFHMIN-NEXT:    fmv.x.h a0, ft0
+; RV64IZFHMIN-NEXT:    flh fa5, 0(sp)
+; RV64IZFHMIN-NEXT:    fmv.x.h a0, fa5
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
 ; RV64IZFHMIN-NEXT:    ret
   %1 = fneg half %b

diff  --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
index de03fdcfe863..901f7f59a62c 100644
--- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
@@ -90,9 +90,9 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_oeq:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    feq.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    feq.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    bnez a0, .LBB1_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -103,9 +103,9 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_oeq:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    feq.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    feq.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    bnez a0, .LBB1_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -150,9 +150,9 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_oeq_alt:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    feq.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    feq.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    bnez a0, .LBB2_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -163,9 +163,9 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_oeq_alt:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    feq.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    feq.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    bnez a0, .LBB2_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -207,9 +207,9 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ogt:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; RV32IZFHMIN-NEXT:    flt.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; RV32IZFHMIN-NEXT:    flt.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    bnez a0, .LBB3_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -220,9 +220,9 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ogt:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; RV64IZFHMIN-NEXT:    flt.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; RV64IZFHMIN-NEXT:    flt.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    bnez a0, .LBB3_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -264,9 +264,9 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_oge:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; RV32IZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; RV32IZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    bnez a0, .LBB4_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -277,9 +277,9 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_oge:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; RV64IZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; RV64IZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    bnez a0, .LBB4_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -321,9 +321,9 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_olt:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    flt.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    bnez a0, .LBB5_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -334,9 +334,9 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_olt:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    flt.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    bnez a0, .LBB5_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -378,9 +378,9 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ole:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    bnez a0, .LBB6_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -391,9 +391,9 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ole:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    bnez a0, .LBB6_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -439,10 +439,10 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_one:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft1, ft0
-; RV32IZFHMIN-NEXT:    flt.s a1, ft0, ft1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    flt.s a0, fa4, fa5
+; RV32IZFHMIN-NEXT:    flt.s a1, fa5, fa4
 ; RV32IZFHMIN-NEXT:    or a0, a1, a0
 ; RV32IZFHMIN-NEXT:    bnez a0, .LBB7_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
@@ -454,10 +454,10 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_one:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft1, ft0
-; RV64IZFHMIN-NEXT:    flt.s a1, ft0, ft1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    flt.s a0, fa4, fa5
+; RV64IZFHMIN-NEXT:    flt.s a1, fa5, fa4
 ; RV64IZFHMIN-NEXT:    or a0, a1, a0
 ; RV64IZFHMIN-NEXT:    bnez a0, .LBB7_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
@@ -504,10 +504,10 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ord:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    feq.s a0, ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    feq.s a0, fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV32IZFHMIN-NEXT:    and a0, a1, a0
 ; RV32IZFHMIN-NEXT:    bnez a0, .LBB8_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
@@ -519,10 +519,10 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ord:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    feq.s a0, ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    feq.s a0, fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
 ; RV64IZFHMIN-NEXT:    bnez a0, .LBB8_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
@@ -569,10 +569,10 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ueq:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft1, ft0
-; RV32IZFHMIN-NEXT:    flt.s a1, ft0, ft1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    flt.s a0, fa4, fa5
+; RV32IZFHMIN-NEXT:    flt.s a1, fa5, fa4
 ; RV32IZFHMIN-NEXT:    or a0, a1, a0
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB9_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
@@ -584,10 +584,10 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ueq:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft1, ft0
-; RV64IZFHMIN-NEXT:    flt.s a1, ft0, ft1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    flt.s a0, fa4, fa5
+; RV64IZFHMIN-NEXT:    flt.s a1, fa5, fa4
 ; RV64IZFHMIN-NEXT:    or a0, a1, a0
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB9_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
@@ -630,9 +630,9 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ugt:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB10_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -643,9 +643,9 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ugt:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB10_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -687,9 +687,9 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_uge:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    flt.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB11_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -700,9 +700,9 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_uge:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    flt.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB11_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -744,9 +744,9 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ult:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; RV32IZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; RV32IZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB12_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -757,9 +757,9 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ult:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; RV64IZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; RV64IZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB12_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -801,9 +801,9 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_ule:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; RV32IZFHMIN-NEXT:    flt.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; RV32IZFHMIN-NEXT:    flt.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB13_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -814,9 +814,9 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_ule:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; RV64IZFHMIN-NEXT:    flt.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; RV64IZFHMIN-NEXT:    flt.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB13_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -858,9 +858,9 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_une:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    feq.s a0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    feq.s a0, fa4, fa5
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB14_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV32IZFHMIN-NEXT:    ret
@@ -871,9 +871,9 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_une:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    feq.s a0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    feq.s a0, fa4, fa5
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB14_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else
 ; RV64IZFHMIN-NEXT:    ret
@@ -919,10 +919,10 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: br_fcmp_uno:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    feq.s a0, ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    feq.s a0, fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV32IZFHMIN-NEXT:    and a0, a1, a0
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB15_2
 ; RV32IZFHMIN-NEXT:  # %bb.1: # %if.else
@@ -934,10 +934,10 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: br_fcmp_uno:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    feq.s a0, ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    feq.s a0, fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB15_2
 ; RV64IZFHMIN-NEXT:  # %bb.1: # %if.else

diff  --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
index 296db8d1beef..638ba4a018ef 100644
--- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
@@ -51,14 +51,14 @@ define i16 @fcvt_si_h(half %a) nounwind strictfp {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_si_h:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_si_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call i16 @llvm.experimental.constrained.fptosi.i16.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i16 %1
@@ -88,14 +88,14 @@ define i16 @fcvt_ui_h(half %a) nounwind strictfp {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_ui_h:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_ui_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call i16 @llvm.experimental.constrained.fptoui.i16.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i16 %1
@@ -120,14 +120,14 @@ define i32 @fcvt_w_h(half %a) nounwind strictfp {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_w_h:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_w_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i32 %1
@@ -152,14 +152,14 @@ define i32 @fcvt_wu_h(half %a) nounwind strictfp {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_wu_h:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_wu_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i32 %1
@@ -193,16 +193,16 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_wu_h_multiple_use:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    seqz a1, a0
 ; CHECK32-IZFHMIN-NEXT:    add a0, a0, a1
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_wu_h_multiple_use:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a0
 ; CHECK64-IZFHMIN-NEXT:    add a0, a0, a1
 ; CHECK64-IZFHMIN-NEXT:    ret
@@ -252,8 +252,8 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_l_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i64 %1
@@ -300,8 +300,8 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_lu_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i64 %1
@@ -341,16 +341,16 @@ define half @fcvt_h_si(i16 %a) nounwind strictfp {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    slli a0, a0, 16
 ; CHECK32-IZFHMIN-NEXT:    srai a0, a0, 16
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.w ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.w fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_si:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    slli a0, a0, 48
 ; CHECK64-IZFHMIN-NEXT:    srai a0, a0, 48
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -375,14 +375,14 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind strictfp {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_si_signext:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.w ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.w fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_si_signext:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -421,16 +421,16 @@ define half @fcvt_h_ui(i16 %a) nounwind strictfp {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    slli a0, a0, 16
 ; CHECK32-IZFHMIN-NEXT:    srli a0, a0, 16
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_ui:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    slli a0, a0, 48
 ; CHECK64-IZFHMIN-NEXT:    srli a0, a0, 48
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -455,14 +455,14 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind strictfp {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_ui_zeroext:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_ui_zeroext:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -486,15 +486,15 @@ define half @fcvt_h_w(i32 %a) nounwind strictfp {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_w:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.w ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.w fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_w:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    sext.w a0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -523,15 +523,15 @@ define half @fcvt_h_w_load(ptr %p) nounwind strictfp {
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_w_load:
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    lw a0, 0(a0)
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.w ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.w fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_w_load:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    lw a0, 0(a0)
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %a = load i32, ptr %p
   %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
@@ -556,16 +556,16 @@ define half @fcvt_h_wu(i32 %a) nounwind strictfp {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_wu:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_wu:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    slli a0, a0, 32
 ; CHECK64-IZFHMIN-NEXT:    srli a0, a0, 32
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -600,15 +600,15 @@ define half @fcvt_h_wu_load(ptr %p) nounwind strictfp {
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_wu_load:
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    lw a0, 0(a0)
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_wu_load:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    lwu a0, 0(a0)
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %a = load i32, ptr %p
   %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
@@ -655,8 +655,8 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_l:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -703,8 +703,8 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_lu:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -902,45 +902,45 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) {
 ; RV32IZFH-LABEL: fcvt_h_w_demanded_bits:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi a0, a0, 1
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0
-; RV32IZFH-NEXT:    fsh ft0, 0(a1)
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0
+; RV32IZFH-NEXT:    fsh fa5, 0(a1)
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcvt_h_w_demanded_bits:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addiw a0, a0, 1
-; RV64IZFH-NEXT:    fcvt.h.w ft0, a0
-; RV64IZFH-NEXT:    fsh ft0, 0(a1)
+; RV64IZFH-NEXT:    fcvt.h.w fa5, a0
+; RV64IZFH-NEXT:    fsh fa5, 0(a1)
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_h_w_demanded_bits:
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi a0, a0, 1
-; RV32IDZFH-NEXT:    fcvt.h.w ft0, a0
-; RV32IDZFH-NEXT:    fsh ft0, 0(a1)
+; RV32IDZFH-NEXT:    fcvt.h.w fa5, a0
+; RV32IDZFH-NEXT:    fsh fa5, 0(a1)
 ; RV32IDZFH-NEXT:    ret
 ;
 ; RV64IDZFH-LABEL: fcvt_h_w_demanded_bits:
 ; RV64IDZFH:       # %bb.0:
 ; RV64IDZFH-NEXT:    addiw a0, a0, 1
-; RV64IDZFH-NEXT:    fcvt.h.w ft0, a0
-; RV64IDZFH-NEXT:    fsh ft0, 0(a1)
+; RV64IDZFH-NEXT:    fcvt.h.w fa5, a0
+; RV64IDZFH-NEXT:    fsh fa5, 0(a1)
 ; RV64IDZFH-NEXT:    ret
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_w_demanded_bits:
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi a0, a0, 1
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.w ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK32-IZFHMIN-NEXT:    fsh ft0, 0(a1)
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.w fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK32-IZFHMIN-NEXT:    fsh fa5, 0(a1)
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_w_demanded_bits:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    addiw a0, a0, 1
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK64-IZFHMIN-NEXT:    fsh ft0, 0(a1)
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK64-IZFHMIN-NEXT:    fsh fa5, 0(a1)
 ; CHECK64-IZFHMIN-NEXT:    ret
   %3 = add i32 %0, 1
   %4 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
@@ -953,37 +953,37 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) {
 ; RV32IZFH-LABEL: fcvt_h_wu_demanded_bits:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi a0, a0, 1
-; RV32IZFH-NEXT:    fcvt.h.wu ft0, a0
-; RV32IZFH-NEXT:    fsh ft0, 0(a1)
+; RV32IZFH-NEXT:    fcvt.h.wu fa5, a0
+; RV32IZFH-NEXT:    fsh fa5, 0(a1)
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcvt_h_wu_demanded_bits:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addiw a0, a0, 1
-; RV64IZFH-NEXT:    fcvt.h.wu ft0, a0
-; RV64IZFH-NEXT:    fsh ft0, 0(a1)
+; RV64IZFH-NEXT:    fcvt.h.wu fa5, a0
+; RV64IZFH-NEXT:    fsh fa5, 0(a1)
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_h_wu_demanded_bits:
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi a0, a0, 1
-; RV32IDZFH-NEXT:    fcvt.h.wu ft0, a0
-; RV32IDZFH-NEXT:    fsh ft0, 0(a1)
+; RV32IDZFH-NEXT:    fcvt.h.wu fa5, a0
+; RV32IDZFH-NEXT:    fsh fa5, 0(a1)
 ; RV32IDZFH-NEXT:    ret
 ;
 ; RV64IDZFH-LABEL: fcvt_h_wu_demanded_bits:
 ; RV64IDZFH:       # %bb.0:
 ; RV64IDZFH-NEXT:    addiw a0, a0, 1
-; RV64IDZFH-NEXT:    fcvt.h.wu ft0, a0
-; RV64IDZFH-NEXT:    fsh ft0, 0(a1)
+; RV64IDZFH-NEXT:    fcvt.h.wu fa5, a0
+; RV64IDZFH-NEXT:    fsh fa5, 0(a1)
 ; RV64IDZFH-NEXT:    ret
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_wu_demanded_bits:
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi a0, a0, 1
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK32-IZFHMIN-NEXT:    fsh ft0, 0(a1)
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK32-IZFHMIN-NEXT:    fsh fa5, 0(a1)
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_wu_demanded_bits:
@@ -991,9 +991,9 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) {
 ; CHECK64-IZFHMIN-NEXT:    addiw a0, a0, 1
 ; CHECK64-IZFHMIN-NEXT:    slli a2, a0, 32
 ; CHECK64-IZFHMIN-NEXT:    srli a2, a2, 32
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a2
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK64-IZFHMIN-NEXT:    fsh ft0, 0(a1)
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a2
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK64-IZFHMIN-NEXT:    fsh fa5, 0(a1)
 ; CHECK64-IZFHMIN-NEXT:    ret
   %3 = add i32 %0, 1
   %4 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp

diff  --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index e28f0cc45483..5b2ffe503a66 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -67,14 +67,14 @@ define i16 @fcvt_si_h(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_si_h:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_si_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = fptosi half %a to i16
   ret i16 %1
@@ -83,15 +83,15 @@ define i16 @fcvt_si_h(half %a) nounwind {
 define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_si_h_sat:
 ; RV32IZFH:       # %bb.0: # %start
-; RV32IZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV32IZFH-NEXT:    flw ft1, %lo(.LCPI1_0)(a0)
+; RV32IZFH-NEXT:    flw fa4, %lo(.LCPI1_0)(a0)
 ; RV32IZFH-NEXT:    lui a0, 815104
-; RV32IZFH-NEXT:    fmv.w.x ft2, a0
-; RV32IZFH-NEXT:    fmax.s ft2, ft0, ft2
-; RV32IZFH-NEXT:    fmin.s ft1, ft2, ft1
-; RV32IZFH-NEXT:    fcvt.w.s a0, ft1, rtz
-; RV32IZFH-NEXT:    feq.s a1, ft0, ft0
+; RV32IZFH-NEXT:    fmv.w.x fa3, a0
+; RV32IZFH-NEXT:    fmax.s fa3, fa5, fa3
+; RV32IZFH-NEXT:    fmin.s fa4, fa3, fa4
+; RV32IZFH-NEXT:    fcvt.w.s a0, fa4, rtz
+; RV32IZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV32IZFH-NEXT:    seqz a1, a1
 ; RV32IZFH-NEXT:    addi a1, a1, -1
 ; RV32IZFH-NEXT:    and a0, a1, a0
@@ -99,15 +99,15 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ;
 ; RV64IZFH-LABEL: fcvt_si_h_sat:
 ; RV64IZFH:       # %bb.0: # %start
-; RV64IZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFH-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV64IZFH-NEXT:    flw ft1, %lo(.LCPI1_0)(a0)
+; RV64IZFH-NEXT:    flw fa4, %lo(.LCPI1_0)(a0)
 ; RV64IZFH-NEXT:    lui a0, 815104
-; RV64IZFH-NEXT:    fmv.w.x ft2, a0
-; RV64IZFH-NEXT:    fmax.s ft2, ft0, ft2
-; RV64IZFH-NEXT:    fmin.s ft1, ft2, ft1
-; RV64IZFH-NEXT:    fcvt.l.s a0, ft1, rtz
-; RV64IZFH-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFH-NEXT:    fmv.w.x fa3, a0
+; RV64IZFH-NEXT:    fmax.s fa3, fa5, fa3
+; RV64IZFH-NEXT:    fmin.s fa4, fa3, fa4
+; RV64IZFH-NEXT:    fcvt.l.s a0, fa4, rtz
+; RV64IZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFH-NEXT:    seqz a1, a1
 ; RV64IZFH-NEXT:    addi a1, a1, -1
 ; RV64IZFH-NEXT:    and a0, a1, a0
@@ -115,15 +115,15 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ;
 ; RV32IDZFH-LABEL: fcvt_si_h_sat:
 ; RV32IDZFH:       # %bb.0: # %start
-; RV32IDZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV32IDZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV32IDZFH-NEXT:    flw ft1, %lo(.LCPI1_0)(a0)
+; RV32IDZFH-NEXT:    flw fa4, %lo(.LCPI1_0)(a0)
 ; RV32IDZFH-NEXT:    lui a0, 815104
-; RV32IDZFH-NEXT:    fmv.w.x ft2, a0
-; RV32IDZFH-NEXT:    fmax.s ft2, ft0, ft2
-; RV32IDZFH-NEXT:    fmin.s ft1, ft2, ft1
-; RV32IDZFH-NEXT:    fcvt.w.s a0, ft1, rtz
-; RV32IDZFH-NEXT:    feq.s a1, ft0, ft0
+; RV32IDZFH-NEXT:    fmv.w.x fa3, a0
+; RV32IDZFH-NEXT:    fmax.s fa3, fa5, fa3
+; RV32IDZFH-NEXT:    fmin.s fa4, fa3, fa4
+; RV32IDZFH-NEXT:    fcvt.w.s a0, fa4, rtz
+; RV32IDZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV32IDZFH-NEXT:    seqz a1, a1
 ; RV32IDZFH-NEXT:    addi a1, a1, -1
 ; RV32IDZFH-NEXT:    and a0, a1, a0
@@ -131,15 +131,15 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ;
 ; RV64IDZFH-LABEL: fcvt_si_h_sat:
 ; RV64IDZFH:       # %bb.0: # %start
-; RV64IDZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV64IDZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV64IDZFH-NEXT:    flw ft1, %lo(.LCPI1_0)(a0)
+; RV64IDZFH-NEXT:    flw fa4, %lo(.LCPI1_0)(a0)
 ; RV64IDZFH-NEXT:    lui a0, 815104
-; RV64IDZFH-NEXT:    fmv.w.x ft2, a0
-; RV64IDZFH-NEXT:    fmax.s ft2, ft0, ft2
-; RV64IDZFH-NEXT:    fmin.s ft1, ft2, ft1
-; RV64IDZFH-NEXT:    fcvt.l.s a0, ft1, rtz
-; RV64IDZFH-NEXT:    feq.s a1, ft0, ft0
+; RV64IDZFH-NEXT:    fmv.w.x fa3, a0
+; RV64IDZFH-NEXT:    fmax.s fa3, fa5, fa3
+; RV64IDZFH-NEXT:    fmin.s fa4, fa3, fa4
+; RV64IDZFH-NEXT:    fcvt.l.s a0, fa4, rtz
+; RV64IDZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV64IDZFH-NEXT:    seqz a1, a1
 ; RV64IDZFH-NEXT:    addi a1, a1, -1
 ; RV64IDZFH-NEXT:    and a0, a1, a0
@@ -233,15 +233,15 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_si_h_sat:
 ; CHECK32-IZFHMIN:       # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECK32-IZFHMIN-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK32-IZFHMIN-NEXT:    flw ft1, %lo(.LCPI1_0)(a0)
+; CHECK32-IZFHMIN-NEXT:    flw fa4, %lo(.LCPI1_0)(a0)
 ; CHECK32-IZFHMIN-NEXT:    lui a0, 815104
-; CHECK32-IZFHMIN-NEXT:    fmv.w.x ft2, a0
-; CHECK32-IZFHMIN-NEXT:    fmax.s ft2, ft0, ft2
-; CHECK32-IZFHMIN-NEXT:    fmin.s ft1, ft2, ft1
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft1, rtz
-; CHECK32-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK32-IZFHMIN-NEXT:    fmv.w.x fa3, a0
+; CHECK32-IZFHMIN-NEXT:    fmax.s fa3, fa5, fa3
+; CHECK32-IZFHMIN-NEXT:    fmin.s fa4, fa3, fa4
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa4, rtz
+; CHECK32-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK32-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK32-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK32-IZFHMIN-NEXT:    and a0, a1, a0
@@ -249,15 +249,15 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_si_h_sat:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECK64-IZFHMIN-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK64-IZFHMIN-NEXT:    flw ft1, %lo(.LCPI1_0)(a0)
+; CHECK64-IZFHMIN-NEXT:    flw fa4, %lo(.LCPI1_0)(a0)
 ; CHECK64-IZFHMIN-NEXT:    lui a0, 815104
-; CHECK64-IZFHMIN-NEXT:    fmv.w.x ft2, a0
-; CHECK64-IZFHMIN-NEXT:    fmax.s ft2, ft0, ft2
-; CHECK64-IZFHMIN-NEXT:    fmin.s ft1, ft2, ft1
-; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, ft1, rtz
-; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK64-IZFHMIN-NEXT:    fmv.w.x fa3, a0
+; CHECK64-IZFHMIN-NEXT:    fmax.s fa3, fa5, fa3
+; CHECK64-IZFHMIN-NEXT:    fmin.s fa4, fa3, fa4
+; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, fa4, rtz
+; CHECK64-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK64-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a1, a0
@@ -315,14 +315,14 @@ define i16 @fcvt_ui_h(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_ui_h:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_ui_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = fptoui half %a to i16
   ret i16 %1
@@ -332,45 +332,45 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_ui_h_sat:
 ; RV32IZFH:       # %bb.0: # %start
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI3_0)(a0)
-; RV32IZFH-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFH-NEXT:    fmv.w.x ft2, zero
-; RV32IZFH-NEXT:    fmax.s ft1, ft1, ft2
-; RV32IZFH-NEXT:    fmin.s ft0, ft1, ft0
-; RV32IZFH-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI3_0)(a0)
+; RV32IZFH-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFH-NEXT:    fmv.w.x fa3, zero
+; RV32IZFH-NEXT:    fmax.s fa4, fa4, fa3
+; RV32IZFH-NEXT:    fmin.s fa5, fa4, fa5
+; RV32IZFH-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcvt_ui_h_sat:
 ; RV64IZFH:       # %bb.0: # %start
 ; RV64IZFH-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV64IZFH-NEXT:    flw ft0, %lo(.LCPI3_0)(a0)
-; RV64IZFH-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFH-NEXT:    fmv.w.x ft2, zero
-; RV64IZFH-NEXT:    fmax.s ft1, ft1, ft2
-; RV64IZFH-NEXT:    fmin.s ft0, ft1, ft0
-; RV64IZFH-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFH-NEXT:    flw fa5, %lo(.LCPI3_0)(a0)
+; RV64IZFH-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFH-NEXT:    fmv.w.x fa3, zero
+; RV64IZFH-NEXT:    fmax.s fa4, fa4, fa3
+; RV64IZFH-NEXT:    fmin.s fa5, fa4, fa5
+; RV64IZFH-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_ui_h_sat:
 ; RV32IDZFH:       # %bb.0: # %start
 ; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV32IDZFH-NEXT:    flw ft0, %lo(.LCPI3_0)(a0)
-; RV32IDZFH-NEXT:    fcvt.s.h ft1, fa0
-; RV32IDZFH-NEXT:    fmv.w.x ft2, zero
-; RV32IDZFH-NEXT:    fmax.s ft1, ft1, ft2
-; RV32IDZFH-NEXT:    fmin.s ft0, ft1, ft0
-; RV32IDZFH-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IDZFH-NEXT:    flw fa5, %lo(.LCPI3_0)(a0)
+; RV32IDZFH-NEXT:    fcvt.s.h fa4, fa0
+; RV32IDZFH-NEXT:    fmv.w.x fa3, zero
+; RV32IDZFH-NEXT:    fmax.s fa4, fa4, fa3
+; RV32IDZFH-NEXT:    fmin.s fa5, fa4, fa5
+; RV32IDZFH-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IDZFH-NEXT:    ret
 ;
 ; RV64IDZFH-LABEL: fcvt_ui_h_sat:
 ; RV64IDZFH:       # %bb.0: # %start
 ; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV64IDZFH-NEXT:    flw ft0, %lo(.LCPI3_0)(a0)
-; RV64IDZFH-NEXT:    fcvt.s.h ft1, fa0
-; RV64IDZFH-NEXT:    fmv.w.x ft2, zero
-; RV64IDZFH-NEXT:    fmax.s ft1, ft1, ft2
-; RV64IDZFH-NEXT:    fmin.s ft0, ft1, ft0
-; RV64IDZFH-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IDZFH-NEXT:    flw fa5, %lo(.LCPI3_0)(a0)
+; RV64IDZFH-NEXT:    fcvt.s.h fa4, fa0
+; RV64IDZFH-NEXT:    fmv.w.x fa3, zero
+; RV64IDZFH-NEXT:    fmax.s fa4, fa4, fa3
+; RV64IDZFH-NEXT:    fmin.s fa5, fa4, fa5
+; RV64IDZFH-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IDZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_ui_h_sat:
@@ -452,23 +452,23 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; CHECK32-IZFHMIN-LABEL: fcvt_ui_h_sat:
 ; CHECK32-IZFHMIN:       # %bb.0: # %start
 ; CHECK32-IZFHMIN-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK32-IZFHMIN-NEXT:    flw ft0, %lo(.LCPI3_0)(a0)
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECK32-IZFHMIN-NEXT:    fmv.w.x ft2, zero
-; CHECK32-IZFHMIN-NEXT:    fmax.s ft1, ft1, ft2
-; CHECK32-IZFHMIN-NEXT:    fmin.s ft0, ft1, ft0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    flw fa5, %lo(.LCPI3_0)(a0)
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECK32-IZFHMIN-NEXT:    fmv.w.x fa3, zero
+; CHECK32-IZFHMIN-NEXT:    fmax.s fa4, fa4, fa3
+; CHECK32-IZFHMIN-NEXT:    fmin.s fa5, fa4, fa5
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_ui_h_sat:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
 ; CHECK64-IZFHMIN-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK64-IZFHMIN-NEXT:    flw ft0, %lo(.LCPI3_0)(a0)
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECK64-IZFHMIN-NEXT:    fmv.w.x ft2, zero
-; CHECK64-IZFHMIN-NEXT:    fmax.s ft1, ft1, ft2
-; CHECK64-IZFHMIN-NEXT:    fmin.s ft0, ft1, ft0
-; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    flw fa5, %lo(.LCPI3_0)(a0)
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECK64-IZFHMIN-NEXT:    fmv.w.x fa3, zero
+; CHECK64-IZFHMIN-NEXT:    fmax.s fa4, fa4, fa3
+; CHECK64-IZFHMIN-NEXT:    fmin.s fa5, fa4, fa5
+; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
 start:
   %0 = tail call i16 @llvm.fptoui.sat.i16.f16(half %a)
@@ -518,14 +518,14 @@ define i32 @fcvt_w_h(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_w_h:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_w_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = fptosi half %a to i32
   ret i32 %1
@@ -651,9 +651,9 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_w_h_sat:
 ; CHECK32-IZFHMIN:       # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECK32-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECK32-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK32-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK32-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK32-IZFHMIN-NEXT:    and a0, a1, a0
@@ -661,9 +661,9 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_w_h_sat:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECK64-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK64-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a1, a0
@@ -716,14 +716,14 @@ define i32 @fcvt_wu_h(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_wu_h:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_wu_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = fptoui half %a to i32
   ret i32 %1
@@ -783,16 +783,16 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_wu_h_multiple_use:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    seqz a1, a0
 ; CHECK32-IZFHMIN-NEXT:    add a0, a0, a1
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_wu_h_multiple_use:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a0
 ; CHECK64-IZFHMIN-NEXT:    add a0, a0, a1
 ; CHECK64-IZFHMIN-NEXT:    ret
@@ -915,9 +915,9 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_wu_h_sat:
 ; CHECK32-IZFHMIN:       # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; CHECK32-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; CHECK32-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK32-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK32-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK32-IZFHMIN-NEXT:    and a0, a1, a0
@@ -925,9 +925,9 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_wu_h_sat:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; CHECK64-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK64-IZFHMIN-NEXT:    addiw a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a0, a1
@@ -1004,8 +1004,8 @@ define i64 @fcvt_l_h(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_l_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = fptosi half %a to i64
   ret i64 %1
@@ -1020,8 +1020,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IZFH-NEXT:    lui a0, 913408
-; RV32IZFH-NEXT:    fmv.w.x ft0, a0
-; RV32IZFH-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, a0
+; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixsfdi at plt
 ; RV32IZFH-NEXT:    lui a3, 524288
@@ -1030,8 +1030,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    lui a1, 524288
 ; RV32IZFH-NEXT:  .LBB10_2: # %start
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI10_0)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI10_0)(a2)
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI10_0)(a2)
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    beqz a2, .LBB10_4
 ; RV32IZFH-NEXT:  # %bb.3:
 ; RV32IZFH-NEXT:    addi a1, a3, -1
@@ -1068,8 +1068,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IDZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IDZFH-NEXT:    lui a0, 913408
-; RV32IDZFH-NEXT:    fmv.w.x ft0, a0
-; RV32IDZFH-NEXT:    fle.s s0, ft0, fs0
+; RV32IDZFH-NEXT:    fmv.w.x fa5, a0
+; RV32IDZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IDZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IDZFH-NEXT:    call __fixsfdi at plt
 ; RV32IDZFH-NEXT:    lui a3, 524288
@@ -1078,8 +1078,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    lui a1, 524288
 ; RV32IDZFH-NEXT:  .LBB10_2: # %start
 ; RV32IDZFH-NEXT:    lui a2, %hi(.LCPI10_0)
-; RV32IDZFH-NEXT:    flw ft0, %lo(.LCPI10_0)(a2)
-; RV32IDZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IDZFH-NEXT:    flw fa5, %lo(.LCPI10_0)(a2)
+; RV32IDZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IDZFH-NEXT:    beqz a2, .LBB10_4
 ; RV32IDZFH-NEXT:  # %bb.3:
 ; RV32IDZFH-NEXT:    addi a1, a3, -1
@@ -1229,8 +1229,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IFZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IFZFHMIN-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IFZFHMIN-NEXT:    lui a0, 913408
-; RV32IFZFHMIN-NEXT:    fmv.w.x ft0, a0
-; RV32IFZFHMIN-NEXT:    fle.s s0, ft0, fs0
+; RV32IFZFHMIN-NEXT:    fmv.w.x fa5, a0
+; RV32IFZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IFZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IFZFHMIN-NEXT:    call __fixsfdi at plt
 ; RV32IFZFHMIN-NEXT:    lui a3, 524288
@@ -1239,8 +1239,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IFZFHMIN-NEXT:    lui a1, 524288
 ; RV32IFZFHMIN-NEXT:  .LBB10_2: # %start
 ; RV32IFZFHMIN-NEXT:    lui a2, %hi(.LCPI10_0)
-; RV32IFZFHMIN-NEXT:    flw ft0, %lo(.LCPI10_0)(a2)
-; RV32IFZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IFZFHMIN-NEXT:    flw fa5, %lo(.LCPI10_0)(a2)
+; RV32IFZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IFZFHMIN-NEXT:    beqz a2, .LBB10_4
 ; RV32IFZFHMIN-NEXT:  # %bb.3:
 ; RV32IFZFHMIN-NEXT:    addi a1, a3, -1
@@ -1262,9 +1262,9 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_l_h_sat:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
-; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
+; CHECK64-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK64-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a1, a0
@@ -1278,8 +1278,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFHMIN-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
 ; RV32IDZFHMIN-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IDZFHMIN-NEXT:    lui a0, 913408
-; RV32IDZFHMIN-NEXT:    fmv.w.x ft0, a0
-; RV32IDZFHMIN-NEXT:    fle.s s0, ft0, fs0
+; RV32IDZFHMIN-NEXT:    fmv.w.x fa5, a0
+; RV32IDZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IDZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IDZFHMIN-NEXT:    call __fixsfdi at plt
 ; RV32IDZFHMIN-NEXT:    lui a3, 524288
@@ -1288,8 +1288,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFHMIN-NEXT:    lui a1, 524288
 ; RV32IDZFHMIN-NEXT:  .LBB10_2: # %start
 ; RV32IDZFHMIN-NEXT:    lui a2, %hi(.LCPI10_0)
-; RV32IDZFHMIN-NEXT:    flw ft0, %lo(.LCPI10_0)(a2)
-; RV32IDZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IDZFHMIN-NEXT:    flw fa5, %lo(.LCPI10_0)(a2)
+; RV32IDZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IDZFHMIN-NEXT:    beqz a2, .LBB10_4
 ; RV32IDZFHMIN-NEXT:  # %bb.3:
 ; RV32IDZFHMIN-NEXT:    addi a1, a3, -1
@@ -1378,8 +1378,8 @@ define i64 @fcvt_lu_h(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_lu_h:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = fptoui half %a to i64
   ret i64 %1
@@ -1393,12 +1393,12 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI12_0)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI12_0)(a0)
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI12_0)(a0)
 ; RV32IZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT:    flt.s a0, ft0, fa0
+; RV32IZFH-NEXT:    flt.s a0, fa5, fa0
 ; RV32IZFH-NEXT:    neg s0, a0
-; RV32IZFH-NEXT:    fmv.w.x ft0, zero
-; RV32IZFH-NEXT:    fle.s a0, ft0, fa0
+; RV32IZFH-NEXT:    fmv.w.x fa5, zero
+; RV32IZFH-NEXT:    fle.s a0, fa5, fa0
 ; RV32IZFH-NEXT:    neg s1, a0
 ; RV32IZFH-NEXT:    call __fixunssfdi at plt
 ; RV32IZFH-NEXT:    and a0, s1, a0
@@ -1427,12 +1427,12 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IDZFH-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI12_0)
-; RV32IDZFH-NEXT:    flw ft0, %lo(.LCPI12_0)(a0)
+; RV32IDZFH-NEXT:    flw fa5, %lo(.LCPI12_0)(a0)
 ; RV32IDZFH-NEXT:    fcvt.s.h fa0, fa0
-; RV32IDZFH-NEXT:    flt.s a0, ft0, fa0
+; RV32IDZFH-NEXT:    flt.s a0, fa5, fa0
 ; RV32IDZFH-NEXT:    neg s0, a0
-; RV32IDZFH-NEXT:    fmv.w.x ft0, zero
-; RV32IDZFH-NEXT:    fle.s a0, ft0, fa0
+; RV32IDZFH-NEXT:    fmv.w.x fa5, zero
+; RV32IDZFH-NEXT:    fle.s a0, fa5, fa0
 ; RV32IDZFH-NEXT:    neg s1, a0
 ; RV32IDZFH-NEXT:    call __fixunssfdi at plt
 ; RV32IDZFH-NEXT:    and a0, s1, a0
@@ -1544,12 +1544,12 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; CHECK32-IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; CHECK32-IZFHMIN-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; CHECK32-IZFHMIN-NEXT:    lui a0, %hi(.LCPI12_0)
-; CHECK32-IZFHMIN-NEXT:    flw ft0, %lo(.LCPI12_0)(a0)
+; CHECK32-IZFHMIN-NEXT:    flw fa5, %lo(.LCPI12_0)(a0)
 ; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa0, fa0
-; CHECK32-IZFHMIN-NEXT:    flt.s a0, ft0, fa0
+; CHECK32-IZFHMIN-NEXT:    flt.s a0, fa5, fa0
 ; CHECK32-IZFHMIN-NEXT:    neg s0, a0
-; CHECK32-IZFHMIN-NEXT:    fmv.w.x ft0, zero
-; CHECK32-IZFHMIN-NEXT:    fle.s a0, ft0, fa0
+; CHECK32-IZFHMIN-NEXT:    fmv.w.x fa5, zero
+; CHECK32-IZFHMIN-NEXT:    fle.s a0, fa5, fa0
 ; CHECK32-IZFHMIN-NEXT:    neg s1, a0
 ; CHECK32-IZFHMIN-NEXT:    call __fixunssfdi at plt
 ; CHECK32-IZFHMIN-NEXT:    and a0, s1, a0
@@ -1564,9 +1564,9 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_lu_h_sat:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
-; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
+; CHECK64-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK64-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a1, a0
@@ -1634,16 +1634,16 @@ define half @fcvt_h_si(i16 %a) nounwind {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    slli a0, a0, 16
 ; CHECK32-IZFHMIN-NEXT:    srai a0, a0, 16
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.w ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.w fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_si:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    slli a0, a0, 48
 ; CHECK64-IZFHMIN-NEXT:    srai a0, a0, 48
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = sitofp i16 %a to half
   ret half %1
@@ -1687,14 +1687,14 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_si_signext:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.w ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.w fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_si_signext:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = sitofp i16 %a to half
   ret half %1
@@ -1757,16 +1757,16 @@ define half @fcvt_h_ui(i16 %a) nounwind {
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    slli a0, a0, 16
 ; CHECK32-IZFHMIN-NEXT:    srli a0, a0, 16
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_ui:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    slli a0, a0, 48
 ; CHECK64-IZFHMIN-NEXT:    srli a0, a0, 48
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = uitofp i16 %a to half
   ret half %1
@@ -1810,14 +1810,14 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_ui_zeroext:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_ui_zeroext:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = uitofp i16 %a to half
   ret half %1
@@ -1862,15 +1862,15 @@ define half @fcvt_h_w(i32 %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_w:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.w ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.w fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_w:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    sext.w a0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = sitofp i32 %a to half
   ret half %1
@@ -1920,15 +1920,15 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_w_load:
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    lw a0, 0(a0)
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.w ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.w fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_w_load:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    lw a0, 0(a0)
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %a = load i32, ptr %p
   %1 = sitofp i32 %a to half
@@ -1974,16 +1974,16 @@ define half @fcvt_h_wu(i32 %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_wu:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_wu:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    slli a0, a0, 32
 ; CHECK64-IZFHMIN-NEXT:    srli a0, a0, 32
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = uitofp i32 %a to half
   ret half %1
@@ -2039,15 +2039,15 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_wu_load:
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    lw a0, 0(a0)
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_wu_load:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    lwu a0, 0(a0)
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %a = load i32, ptr %p
   %1 = uitofp i32 %a to half
@@ -2114,8 +2114,8 @@ define half @fcvt_h_l(i64 %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_l:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = sitofp i64 %a to half
   ret half %1
@@ -2181,8 +2181,8 @@ define half @fcvt_h_lu(i64 %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_lu:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = uitofp i64 %a to half
   ret half %1
@@ -2533,29 +2533,29 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32IZFH-LABEL: fcvt_h_w_demanded_bits:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi a0, a0, 1
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0
-; RV32IZFH-NEXT:    fsh ft0, 0(a1)
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0
+; RV32IZFH-NEXT:    fsh fa5, 0(a1)
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcvt_h_w_demanded_bits:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addiw a0, a0, 1
-; RV64IZFH-NEXT:    fcvt.h.w ft0, a0
-; RV64IZFH-NEXT:    fsh ft0, 0(a1)
+; RV64IZFH-NEXT:    fcvt.h.w fa5, a0
+; RV64IZFH-NEXT:    fsh fa5, 0(a1)
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_h_w_demanded_bits:
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi a0, a0, 1
-; RV32IDZFH-NEXT:    fcvt.h.w ft0, a0
-; RV32IDZFH-NEXT:    fsh ft0, 0(a1)
+; RV32IDZFH-NEXT:    fcvt.h.w fa5, a0
+; RV32IDZFH-NEXT:    fsh fa5, 0(a1)
 ; RV32IDZFH-NEXT:    ret
 ;
 ; RV64IDZFH-LABEL: fcvt_h_w_demanded_bits:
 ; RV64IDZFH:       # %bb.0:
 ; RV64IDZFH-NEXT:    addiw a0, a0, 1
-; RV64IDZFH-NEXT:    fcvt.h.w ft0, a0
-; RV64IDZFH-NEXT:    fsh ft0, 0(a1)
+; RV64IDZFH-NEXT:    fcvt.h.w fa5, a0
+; RV64IDZFH-NEXT:    fsh fa5, 0(a1)
 ; RV64IDZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_h_w_demanded_bits:
@@ -2599,17 +2599,17 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_w_demanded_bits:
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi a0, a0, 1
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.w ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK32-IZFHMIN-NEXT:    fsh ft0, 0(a1)
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.w fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK32-IZFHMIN-NEXT:    fsh fa5, 0(a1)
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_w_demanded_bits:
 ; CHECK64-IZFHMIN:       # %bb.0:
 ; CHECK64-IZFHMIN-NEXT:    addiw a0, a0, 1
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK64-IZFHMIN-NEXT:    fsh ft0, 0(a1)
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK64-IZFHMIN-NEXT:    fsh fa5, 0(a1)
 ; CHECK64-IZFHMIN-NEXT:    ret
   %3 = add i32 %0, 1
   %4 = sitofp i32 %3 to half
@@ -2622,29 +2622,29 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV32IZFH-LABEL: fcvt_h_wu_demanded_bits:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi a0, a0, 1
-; RV32IZFH-NEXT:    fcvt.h.wu ft0, a0
-; RV32IZFH-NEXT:    fsh ft0, 0(a1)
+; RV32IZFH-NEXT:    fcvt.h.wu fa5, a0
+; RV32IZFH-NEXT:    fsh fa5, 0(a1)
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcvt_h_wu_demanded_bits:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addiw a0, a0, 1
-; RV64IZFH-NEXT:    fcvt.h.wu ft0, a0
-; RV64IZFH-NEXT:    fsh ft0, 0(a1)
+; RV64IZFH-NEXT:    fcvt.h.wu fa5, a0
+; RV64IZFH-NEXT:    fsh fa5, 0(a1)
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_h_wu_demanded_bits:
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    addi a0, a0, 1
-; RV32IDZFH-NEXT:    fcvt.h.wu ft0, a0
-; RV32IDZFH-NEXT:    fsh ft0, 0(a1)
+; RV32IDZFH-NEXT:    fcvt.h.wu fa5, a0
+; RV32IDZFH-NEXT:    fsh fa5, 0(a1)
 ; RV32IDZFH-NEXT:    ret
 ;
 ; RV64IDZFH-LABEL: fcvt_h_wu_demanded_bits:
 ; RV64IDZFH:       # %bb.0:
 ; RV64IDZFH-NEXT:    addiw a0, a0, 1
-; RV64IDZFH-NEXT:    fcvt.h.wu ft0, a0
-; RV64IDZFH-NEXT:    fsh ft0, 0(a1)
+; RV64IDZFH-NEXT:    fcvt.h.wu fa5, a0
+; RV64IDZFH-NEXT:    fsh fa5, 0(a1)
 ; RV64IDZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_h_wu_demanded_bits:
@@ -2688,9 +2688,9 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; CHECK32-IZFHMIN-LABEL: fcvt_h_wu_demanded_bits:
 ; CHECK32-IZFHMIN:       # %bb.0:
 ; CHECK32-IZFHMIN-NEXT:    addi a0, a0, 1
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu ft0, a0
-; CHECK32-IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK32-IZFHMIN-NEXT:    fsh ft0, 0(a1)
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.wu fa5, a0
+; CHECK32-IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK32-IZFHMIN-NEXT:    fsh fa5, 0(a1)
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_h_wu_demanded_bits:
@@ -2698,9 +2698,9 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; CHECK64-IZFHMIN-NEXT:    addiw a0, a0, 1
 ; CHECK64-IZFHMIN-NEXT:    slli a2, a0, 32
 ; CHECK64-IZFHMIN-NEXT:    srli a2, a2, 32
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu ft0, a2
-; CHECK64-IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECK64-IZFHMIN-NEXT:    fsh ft0, 0(a1)
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.lu fa5, a2
+; CHECK64-IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECK64-IZFHMIN-NEXT:    fsh fa5, 0(a1)
 ; CHECK64-IZFHMIN-NEXT:    ret
   %3 = add i32 %0, 1
   %4 = uitofp i32 %3 to half
@@ -2755,14 +2755,14 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_w_s_i16:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_w_s_i16:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = fptosi half %a to i16
   ret i16 %1
@@ -2771,15 +2771,15 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
 define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_w_s_sat_i16:
 ; RV32IZFH:       # %bb.0: # %start
-; RV32IZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI32_0)
-; RV32IZFH-NEXT:    flw ft1, %lo(.LCPI32_0)(a0)
+; RV32IZFH-NEXT:    flw fa4, %lo(.LCPI32_0)(a0)
 ; RV32IZFH-NEXT:    lui a0, 815104
-; RV32IZFH-NEXT:    fmv.w.x ft2, a0
-; RV32IZFH-NEXT:    fmax.s ft2, ft0, ft2
-; RV32IZFH-NEXT:    fmin.s ft1, ft2, ft1
-; RV32IZFH-NEXT:    fcvt.w.s a0, ft1, rtz
-; RV32IZFH-NEXT:    feq.s a1, ft0, ft0
+; RV32IZFH-NEXT:    fmv.w.x fa3, a0
+; RV32IZFH-NEXT:    fmax.s fa3, fa5, fa3
+; RV32IZFH-NEXT:    fmin.s fa4, fa3, fa4
+; RV32IZFH-NEXT:    fcvt.w.s a0, fa4, rtz
+; RV32IZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV32IZFH-NEXT:    seqz a1, a1
 ; RV32IZFH-NEXT:    addi a1, a1, -1
 ; RV32IZFH-NEXT:    and a0, a1, a0
@@ -2787,15 +2787,15 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ;
 ; RV64IZFH-LABEL: fcvt_w_s_sat_i16:
 ; RV64IZFH:       # %bb.0: # %start
-; RV64IZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFH-NEXT:    lui a0, %hi(.LCPI32_0)
-; RV64IZFH-NEXT:    flw ft1, %lo(.LCPI32_0)(a0)
+; RV64IZFH-NEXT:    flw fa4, %lo(.LCPI32_0)(a0)
 ; RV64IZFH-NEXT:    lui a0, 815104
-; RV64IZFH-NEXT:    fmv.w.x ft2, a0
-; RV64IZFH-NEXT:    fmax.s ft2, ft0, ft2
-; RV64IZFH-NEXT:    fmin.s ft1, ft2, ft1
-; RV64IZFH-NEXT:    fcvt.l.s a0, ft1, rtz
-; RV64IZFH-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFH-NEXT:    fmv.w.x fa3, a0
+; RV64IZFH-NEXT:    fmax.s fa3, fa5, fa3
+; RV64IZFH-NEXT:    fmin.s fa4, fa3, fa4
+; RV64IZFH-NEXT:    fcvt.l.s a0, fa4, rtz
+; RV64IZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFH-NEXT:    seqz a1, a1
 ; RV64IZFH-NEXT:    addi a1, a1, -1
 ; RV64IZFH-NEXT:    and a0, a1, a0
@@ -2803,15 +2803,15 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ;
 ; RV32IDZFH-LABEL: fcvt_w_s_sat_i16:
 ; RV32IDZFH:       # %bb.0: # %start
-; RV32IDZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV32IDZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI32_0)
-; RV32IDZFH-NEXT:    flw ft1, %lo(.LCPI32_0)(a0)
+; RV32IDZFH-NEXT:    flw fa4, %lo(.LCPI32_0)(a0)
 ; RV32IDZFH-NEXT:    lui a0, 815104
-; RV32IDZFH-NEXT:    fmv.w.x ft2, a0
-; RV32IDZFH-NEXT:    fmax.s ft2, ft0, ft2
-; RV32IDZFH-NEXT:    fmin.s ft1, ft2, ft1
-; RV32IDZFH-NEXT:    fcvt.w.s a0, ft1, rtz
-; RV32IDZFH-NEXT:    feq.s a1, ft0, ft0
+; RV32IDZFH-NEXT:    fmv.w.x fa3, a0
+; RV32IDZFH-NEXT:    fmax.s fa3, fa5, fa3
+; RV32IDZFH-NEXT:    fmin.s fa4, fa3, fa4
+; RV32IDZFH-NEXT:    fcvt.w.s a0, fa4, rtz
+; RV32IDZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV32IDZFH-NEXT:    seqz a1, a1
 ; RV32IDZFH-NEXT:    addi a1, a1, -1
 ; RV32IDZFH-NEXT:    and a0, a1, a0
@@ -2819,15 +2819,15 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ;
 ; RV64IDZFH-LABEL: fcvt_w_s_sat_i16:
 ; RV64IDZFH:       # %bb.0: # %start
-; RV64IDZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV64IDZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI32_0)
-; RV64IDZFH-NEXT:    flw ft1, %lo(.LCPI32_0)(a0)
+; RV64IDZFH-NEXT:    flw fa4, %lo(.LCPI32_0)(a0)
 ; RV64IDZFH-NEXT:    lui a0, 815104
-; RV64IDZFH-NEXT:    fmv.w.x ft2, a0
-; RV64IDZFH-NEXT:    fmax.s ft2, ft0, ft2
-; RV64IDZFH-NEXT:    fmin.s ft1, ft2, ft1
-; RV64IDZFH-NEXT:    fcvt.l.s a0, ft1, rtz
-; RV64IDZFH-NEXT:    feq.s a1, ft0, ft0
+; RV64IDZFH-NEXT:    fmv.w.x fa3, a0
+; RV64IDZFH-NEXT:    fmax.s fa3, fa5, fa3
+; RV64IDZFH-NEXT:    fmin.s fa4, fa3, fa4
+; RV64IDZFH-NEXT:    fcvt.l.s a0, fa4, rtz
+; RV64IDZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV64IDZFH-NEXT:    seqz a1, a1
 ; RV64IDZFH-NEXT:    addi a1, a1, -1
 ; RV64IDZFH-NEXT:    and a0, a1, a0
@@ -2925,15 +2925,15 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_w_s_sat_i16:
 ; CHECK32-IZFHMIN:       # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECK32-IZFHMIN-NEXT:    lui a0, %hi(.LCPI32_0)
-; CHECK32-IZFHMIN-NEXT:    flw ft1, %lo(.LCPI32_0)(a0)
+; CHECK32-IZFHMIN-NEXT:    flw fa4, %lo(.LCPI32_0)(a0)
 ; CHECK32-IZFHMIN-NEXT:    lui a0, 815104
-; CHECK32-IZFHMIN-NEXT:    fmv.w.x ft2, a0
-; CHECK32-IZFHMIN-NEXT:    fmax.s ft2, ft0, ft2
-; CHECK32-IZFHMIN-NEXT:    fmin.s ft1, ft2, ft1
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft1, rtz
-; CHECK32-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK32-IZFHMIN-NEXT:    fmv.w.x fa3, a0
+; CHECK32-IZFHMIN-NEXT:    fmax.s fa3, fa5, fa3
+; CHECK32-IZFHMIN-NEXT:    fmin.s fa4, fa3, fa4
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa4, rtz
+; CHECK32-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK32-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK32-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK32-IZFHMIN-NEXT:    and a0, a1, a0
@@ -2941,15 +2941,15 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_w_s_sat_i16:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECK64-IZFHMIN-NEXT:    lui a0, %hi(.LCPI32_0)
-; CHECK64-IZFHMIN-NEXT:    flw ft1, %lo(.LCPI32_0)(a0)
+; CHECK64-IZFHMIN-NEXT:    flw fa4, %lo(.LCPI32_0)(a0)
 ; CHECK64-IZFHMIN-NEXT:    lui a0, 815104
-; CHECK64-IZFHMIN-NEXT:    fmv.w.x ft2, a0
-; CHECK64-IZFHMIN-NEXT:    fmax.s ft2, ft0, ft2
-; CHECK64-IZFHMIN-NEXT:    fmin.s ft1, ft2, ft1
-; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, ft1, rtz
-; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK64-IZFHMIN-NEXT:    fmv.w.x fa3, a0
+; CHECK64-IZFHMIN-NEXT:    fmax.s fa3, fa5, fa3
+; CHECK64-IZFHMIN-NEXT:    fmin.s fa4, fa3, fa4
+; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, fa4, rtz
+; CHECK64-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK64-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a1, a0
@@ -3006,14 +3006,14 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_wu_s_i16:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_wu_s_i16:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = fptoui half %a to i16
   ret i16 %1
@@ -3023,45 +3023,45 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_wu_s_sat_i16:
 ; RV32IZFH:       # %bb.0: # %start
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI34_0)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI34_0)(a0)
-; RV32IZFH-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFH-NEXT:    fmv.w.x ft2, zero
-; RV32IZFH-NEXT:    fmax.s ft1, ft1, ft2
-; RV32IZFH-NEXT:    fmin.s ft0, ft1, ft0
-; RV32IZFH-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI34_0)(a0)
+; RV32IZFH-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFH-NEXT:    fmv.w.x fa3, zero
+; RV32IZFH-NEXT:    fmax.s fa4, fa4, fa3
+; RV32IZFH-NEXT:    fmin.s fa5, fa4, fa5
+; RV32IZFH-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcvt_wu_s_sat_i16:
 ; RV64IZFH:       # %bb.0: # %start
 ; RV64IZFH-NEXT:    lui a0, %hi(.LCPI34_0)
-; RV64IZFH-NEXT:    flw ft0, %lo(.LCPI34_0)(a0)
-; RV64IZFH-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFH-NEXT:    fmv.w.x ft2, zero
-; RV64IZFH-NEXT:    fmax.s ft1, ft1, ft2
-; RV64IZFH-NEXT:    fmin.s ft0, ft1, ft0
-; RV64IZFH-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFH-NEXT:    flw fa5, %lo(.LCPI34_0)(a0)
+; RV64IZFH-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFH-NEXT:    fmv.w.x fa3, zero
+; RV64IZFH-NEXT:    fmax.s fa4, fa4, fa3
+; RV64IZFH-NEXT:    fmin.s fa5, fa4, fa5
+; RV64IZFH-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_wu_s_sat_i16:
 ; RV32IDZFH:       # %bb.0: # %start
 ; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI34_0)
-; RV32IDZFH-NEXT:    flw ft0, %lo(.LCPI34_0)(a0)
-; RV32IDZFH-NEXT:    fcvt.s.h ft1, fa0
-; RV32IDZFH-NEXT:    fmv.w.x ft2, zero
-; RV32IDZFH-NEXT:    fmax.s ft1, ft1, ft2
-; RV32IDZFH-NEXT:    fmin.s ft0, ft1, ft0
-; RV32IDZFH-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IDZFH-NEXT:    flw fa5, %lo(.LCPI34_0)(a0)
+; RV32IDZFH-NEXT:    fcvt.s.h fa4, fa0
+; RV32IDZFH-NEXT:    fmv.w.x fa3, zero
+; RV32IDZFH-NEXT:    fmax.s fa4, fa4, fa3
+; RV32IDZFH-NEXT:    fmin.s fa5, fa4, fa5
+; RV32IDZFH-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IDZFH-NEXT:    ret
 ;
 ; RV64IDZFH-LABEL: fcvt_wu_s_sat_i16:
 ; RV64IDZFH:       # %bb.0: # %start
 ; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI34_0)
-; RV64IDZFH-NEXT:    flw ft0, %lo(.LCPI34_0)(a0)
-; RV64IDZFH-NEXT:    fcvt.s.h ft1, fa0
-; RV64IDZFH-NEXT:    fmv.w.x ft2, zero
-; RV64IDZFH-NEXT:    fmax.s ft1, ft1, ft2
-; RV64IDZFH-NEXT:    fmin.s ft0, ft1, ft0
-; RV64IDZFH-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IDZFH-NEXT:    flw fa5, %lo(.LCPI34_0)(a0)
+; RV64IDZFH-NEXT:    fcvt.s.h fa4, fa0
+; RV64IDZFH-NEXT:    fmv.w.x fa3, zero
+; RV64IDZFH-NEXT:    fmax.s fa4, fa4, fa3
+; RV64IDZFH-NEXT:    fmin.s fa5, fa4, fa5
+; RV64IDZFH-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IDZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_sat_i16:
@@ -3149,23 +3149,23 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; CHECK32-IZFHMIN-LABEL: fcvt_wu_s_sat_i16:
 ; CHECK32-IZFHMIN:       # %bb.0: # %start
 ; CHECK32-IZFHMIN-NEXT:    lui a0, %hi(.LCPI34_0)
-; CHECK32-IZFHMIN-NEXT:    flw ft0, %lo(.LCPI34_0)(a0)
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECK32-IZFHMIN-NEXT:    fmv.w.x ft2, zero
-; CHECK32-IZFHMIN-NEXT:    fmax.s ft1, ft1, ft2
-; CHECK32-IZFHMIN-NEXT:    fmin.s ft0, ft1, ft0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    flw fa5, %lo(.LCPI34_0)(a0)
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECK32-IZFHMIN-NEXT:    fmv.w.x fa3, zero
+; CHECK32-IZFHMIN-NEXT:    fmax.s fa4, fa4, fa3
+; CHECK32-IZFHMIN-NEXT:    fmin.s fa5, fa4, fa5
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_wu_s_sat_i16:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
 ; CHECK64-IZFHMIN-NEXT:    lui a0, %hi(.LCPI34_0)
-; CHECK64-IZFHMIN-NEXT:    flw ft0, %lo(.LCPI34_0)(a0)
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECK64-IZFHMIN-NEXT:    fmv.w.x ft2, zero
-; CHECK64-IZFHMIN-NEXT:    fmax.s ft1, ft1, ft2
-; CHECK64-IZFHMIN-NEXT:    fmin.s ft0, ft1, ft0
-; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    flw fa5, %lo(.LCPI34_0)(a0)
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECK64-IZFHMIN-NEXT:    fmv.w.x fa3, zero
+; CHECK64-IZFHMIN-NEXT:    fmax.s fa4, fa4, fa3
+; CHECK64-IZFHMIN-NEXT:    fmin.s fa5, fa4, fa5
+; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
 start:
   %0 = tail call i16 @llvm.fptoui.sat.i16.f16(half %a)
@@ -3219,14 +3219,14 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_w_s_i8:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_w_s_i8:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = fptosi half %a to i8
   ret i8 %1
@@ -3235,15 +3235,15 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
 define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_w_s_sat_i8:
 ; RV32IZFH:       # %bb.0: # %start
-; RV32IZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFH-NEXT:    lui a0, 798720
-; RV32IZFH-NEXT:    fmv.w.x ft1, a0
-; RV32IZFH-NEXT:    fmax.s ft1, ft0, ft1
+; RV32IZFH-NEXT:    fmv.w.x fa4, a0
+; RV32IZFH-NEXT:    fmax.s fa4, fa5, fa4
 ; RV32IZFH-NEXT:    lui a0, 274400
-; RV32IZFH-NEXT:    fmv.w.x ft2, a0
-; RV32IZFH-NEXT:    fmin.s ft1, ft1, ft2
-; RV32IZFH-NEXT:    fcvt.w.s a0, ft1, rtz
-; RV32IZFH-NEXT:    feq.s a1, ft0, ft0
+; RV32IZFH-NEXT:    fmv.w.x fa3, a0
+; RV32IZFH-NEXT:    fmin.s fa4, fa4, fa3
+; RV32IZFH-NEXT:    fcvt.w.s a0, fa4, rtz
+; RV32IZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV32IZFH-NEXT:    seqz a1, a1
 ; RV32IZFH-NEXT:    addi a1, a1, -1
 ; RV32IZFH-NEXT:    and a0, a1, a0
@@ -3251,15 +3251,15 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ;
 ; RV64IZFH-LABEL: fcvt_w_s_sat_i8:
 ; RV64IZFH:       # %bb.0: # %start
-; RV64IZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFH-NEXT:    lui a0, 798720
-; RV64IZFH-NEXT:    fmv.w.x ft1, a0
-; RV64IZFH-NEXT:    fmax.s ft1, ft0, ft1
+; RV64IZFH-NEXT:    fmv.w.x fa4, a0
+; RV64IZFH-NEXT:    fmax.s fa4, fa5, fa4
 ; RV64IZFH-NEXT:    lui a0, 274400
-; RV64IZFH-NEXT:    fmv.w.x ft2, a0
-; RV64IZFH-NEXT:    fmin.s ft1, ft1, ft2
-; RV64IZFH-NEXT:    fcvt.l.s a0, ft1, rtz
-; RV64IZFH-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFH-NEXT:    fmv.w.x fa3, a0
+; RV64IZFH-NEXT:    fmin.s fa4, fa4, fa3
+; RV64IZFH-NEXT:    fcvt.l.s a0, fa4, rtz
+; RV64IZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFH-NEXT:    seqz a1, a1
 ; RV64IZFH-NEXT:    addi a1, a1, -1
 ; RV64IZFH-NEXT:    and a0, a1, a0
@@ -3267,15 +3267,15 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ;
 ; RV32IDZFH-LABEL: fcvt_w_s_sat_i8:
 ; RV32IDZFH:       # %bb.0: # %start
-; RV32IDZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV32IDZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IDZFH-NEXT:    lui a0, 798720
-; RV32IDZFH-NEXT:    fmv.w.x ft1, a0
-; RV32IDZFH-NEXT:    fmax.s ft1, ft0, ft1
+; RV32IDZFH-NEXT:    fmv.w.x fa4, a0
+; RV32IDZFH-NEXT:    fmax.s fa4, fa5, fa4
 ; RV32IDZFH-NEXT:    lui a0, 274400
-; RV32IDZFH-NEXT:    fmv.w.x ft2, a0
-; RV32IDZFH-NEXT:    fmin.s ft1, ft1, ft2
-; RV32IDZFH-NEXT:    fcvt.w.s a0, ft1, rtz
-; RV32IDZFH-NEXT:    feq.s a1, ft0, ft0
+; RV32IDZFH-NEXT:    fmv.w.x fa3, a0
+; RV32IDZFH-NEXT:    fmin.s fa4, fa4, fa3
+; RV32IDZFH-NEXT:    fcvt.w.s a0, fa4, rtz
+; RV32IDZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV32IDZFH-NEXT:    seqz a1, a1
 ; RV32IDZFH-NEXT:    addi a1, a1, -1
 ; RV32IDZFH-NEXT:    and a0, a1, a0
@@ -3283,15 +3283,15 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ;
 ; RV64IDZFH-LABEL: fcvt_w_s_sat_i8:
 ; RV64IDZFH:       # %bb.0: # %start
-; RV64IDZFH-NEXT:    fcvt.s.h ft0, fa0
+; RV64IDZFH-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IDZFH-NEXT:    lui a0, 798720
-; RV64IDZFH-NEXT:    fmv.w.x ft1, a0
-; RV64IDZFH-NEXT:    fmax.s ft1, ft0, ft1
+; RV64IDZFH-NEXT:    fmv.w.x fa4, a0
+; RV64IDZFH-NEXT:    fmax.s fa4, fa5, fa4
 ; RV64IDZFH-NEXT:    lui a0, 274400
-; RV64IDZFH-NEXT:    fmv.w.x ft2, a0
-; RV64IDZFH-NEXT:    fmin.s ft1, ft1, ft2
-; RV64IDZFH-NEXT:    fcvt.l.s a0, ft1, rtz
-; RV64IDZFH-NEXT:    feq.s a1, ft0, ft0
+; RV64IDZFH-NEXT:    fmv.w.x fa3, a0
+; RV64IDZFH-NEXT:    fmin.s fa4, fa4, fa3
+; RV64IDZFH-NEXT:    fcvt.l.s a0, fa4, rtz
+; RV64IDZFH-NEXT:    feq.s a1, fa5, fa5
 ; RV64IDZFH-NEXT:    seqz a1, a1
 ; RV64IDZFH-NEXT:    addi a1, a1, -1
 ; RV64IDZFH-NEXT:    and a0, a1, a0
@@ -3385,15 +3385,15 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_w_s_sat_i8:
 ; CHECK32-IZFHMIN:       # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECK32-IZFHMIN-NEXT:    lui a0, 798720
-; CHECK32-IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECK32-IZFHMIN-NEXT:    fmax.s ft1, ft0, ft1
+; CHECK32-IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECK32-IZFHMIN-NEXT:    fmax.s fa4, fa5, fa4
 ; CHECK32-IZFHMIN-NEXT:    lui a0, 274400
-; CHECK32-IZFHMIN-NEXT:    fmv.w.x ft2, a0
-; CHECK32-IZFHMIN-NEXT:    fmin.s ft1, ft1, ft2
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft1, rtz
-; CHECK32-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK32-IZFHMIN-NEXT:    fmv.w.x fa3, a0
+; CHECK32-IZFHMIN-NEXT:    fmin.s fa4, fa4, fa3
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa4, rtz
+; CHECK32-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK32-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK32-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK32-IZFHMIN-NEXT:    and a0, a1, a0
@@ -3401,15 +3401,15 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_w_s_sat_i8:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECK64-IZFHMIN-NEXT:    lui a0, 798720
-; CHECK64-IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECK64-IZFHMIN-NEXT:    fmax.s ft1, ft0, ft1
+; CHECK64-IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECK64-IZFHMIN-NEXT:    fmax.s fa4, fa5, fa4
 ; CHECK64-IZFHMIN-NEXT:    lui a0, 274400
-; CHECK64-IZFHMIN-NEXT:    fmv.w.x ft2, a0
-; CHECK64-IZFHMIN-NEXT:    fmin.s ft1, ft1, ft2
-; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, ft1, rtz
-; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK64-IZFHMIN-NEXT:    fmv.w.x fa3, a0
+; CHECK64-IZFHMIN-NEXT:    fmin.s fa4, fa4, fa3
+; CHECK64-IZFHMIN-NEXT:    fcvt.l.s a0, fa4, rtz
+; CHECK64-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK64-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a1, a0
@@ -3467,14 +3467,14 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_wu_s_i8:
 ; CHECK32-IZFHMIN:       # %bb.0:
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_wu_s_i8:
 ; CHECK64-IZFHMIN:       # %bb.0:
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
   %1 = fptoui half %a to i8
   ret i8 %1
@@ -3483,46 +3483,46 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
 define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_wu_s_sat_i8:
 ; RV32IZFH:       # %bb.0: # %start
-; RV32IZFH-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFH-NEXT:    fmv.w.x ft1, zero
-; RV32IZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV32IZFH-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFH-NEXT:    fmv.w.x fa4, zero
+; RV32IZFH-NEXT:    fmax.s fa5, fa5, fa4
 ; RV32IZFH-NEXT:    lui a0, 276464
-; RV32IZFH-NEXT:    fmv.w.x ft1, a0
-; RV32IZFH-NEXT:    fmin.s ft0, ft0, ft1
-; RV32IZFH-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFH-NEXT:    fmv.w.x fa4, a0
+; RV32IZFH-NEXT:    fmin.s fa5, fa5, fa4
+; RV32IZFH-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcvt_wu_s_sat_i8:
 ; RV64IZFH:       # %bb.0: # %start
-; RV64IZFH-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFH-NEXT:    fmv.w.x ft1, zero
-; RV64IZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV64IZFH-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFH-NEXT:    fmv.w.x fa4, zero
+; RV64IZFH-NEXT:    fmax.s fa5, fa5, fa4
 ; RV64IZFH-NEXT:    lui a0, 276464
-; RV64IZFH-NEXT:    fmv.w.x ft1, a0
-; RV64IZFH-NEXT:    fmin.s ft0, ft0, ft1
-; RV64IZFH-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFH-NEXT:    fmv.w.x fa4, a0
+; RV64IZFH-NEXT:    fmin.s fa5, fa5, fa4
+; RV64IZFH-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_wu_s_sat_i8:
 ; RV32IDZFH:       # %bb.0: # %start
-; RV32IDZFH-NEXT:    fcvt.s.h ft0, fa0
-; RV32IDZFH-NEXT:    fmv.w.x ft1, zero
-; RV32IDZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV32IDZFH-NEXT:    fcvt.s.h fa5, fa0
+; RV32IDZFH-NEXT:    fmv.w.x fa4, zero
+; RV32IDZFH-NEXT:    fmax.s fa5, fa5, fa4
 ; RV32IDZFH-NEXT:    lui a0, 276464
-; RV32IDZFH-NEXT:    fmv.w.x ft1, a0
-; RV32IDZFH-NEXT:    fmin.s ft0, ft0, ft1
-; RV32IDZFH-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IDZFH-NEXT:    fmv.w.x fa4, a0
+; RV32IDZFH-NEXT:    fmin.s fa5, fa5, fa4
+; RV32IDZFH-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IDZFH-NEXT:    ret
 ;
 ; RV64IDZFH-LABEL: fcvt_wu_s_sat_i8:
 ; RV64IDZFH:       # %bb.0: # %start
-; RV64IDZFH-NEXT:    fcvt.s.h ft0, fa0
-; RV64IDZFH-NEXT:    fmv.w.x ft1, zero
-; RV64IDZFH-NEXT:    fmax.s ft0, ft0, ft1
+; RV64IDZFH-NEXT:    fcvt.s.h fa5, fa0
+; RV64IDZFH-NEXT:    fmv.w.x fa4, zero
+; RV64IDZFH-NEXT:    fmax.s fa5, fa5, fa4
 ; RV64IDZFH-NEXT:    lui a0, 276464
-; RV64IDZFH-NEXT:    fmv.w.x ft1, a0
-; RV64IDZFH-NEXT:    fmin.s ft0, ft0, ft1
-; RV64IDZFH-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IDZFH-NEXT:    fmv.w.x fa4, a0
+; RV64IDZFH-NEXT:    fmin.s fa5, fa5, fa4
+; RV64IDZFH-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IDZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_sat_i8:
@@ -3601,24 +3601,24 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_wu_s_sat_i8:
 ; CHECK32-IZFHMIN:       # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECK32-IZFHMIN-NEXT:    fmax.s ft0, ft0, ft1
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECK32-IZFHMIN-NEXT:    fmax.s fa5, fa5, fa4
 ; CHECK32-IZFHMIN-NEXT:    lui a0, 276464
-; CHECK32-IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECK32-IZFHMIN-NEXT:    fmin.s ft0, ft0, ft1
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECK32-IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECK32-IZFHMIN-NEXT:    fmin.s fa5, fa5, fa4
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECK32-IZFHMIN-NEXT:    ret
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_wu_s_sat_i8:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fmv.w.x ft1, zero
-; CHECK64-IZFHMIN-NEXT:    fmax.s ft0, ft0, ft1
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fmv.w.x fa4, zero
+; CHECK64-IZFHMIN-NEXT:    fmax.s fa5, fa5, fa4
 ; CHECK64-IZFHMIN-NEXT:    lui a0, 276464
-; CHECK64-IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECK64-IZFHMIN-NEXT:    fmin.s ft0, ft0, ft1
-; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; CHECK64-IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECK64-IZFHMIN-NEXT:    fmin.s fa5, fa5, fa4
+; CHECK64-IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; CHECK64-IZFHMIN-NEXT:    ret
 start:
   %0 = tail call i8 @llvm.fptoui.sat.i8.f16(half %a)
@@ -3741,9 +3741,9 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_wu_h_sat_zext:
 ; CHECK32-IZFHMIN:       # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; CHECK32-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; CHECK32-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK32-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK32-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK32-IZFHMIN-NEXT:    and a0, a1, a0
@@ -3751,9 +3751,9 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_wu_h_sat_zext:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; CHECK64-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK64-IZFHMIN-NEXT:    addiw a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a0, a1
@@ -3886,9 +3886,9 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ;
 ; CHECK32-IZFHMIN-LABEL: fcvt_w_h_sat_sext:
 ; CHECK32-IZFHMIN:       # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECK32-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK32-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECK32-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK32-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK32-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK32-IZFHMIN-NEXT:    and a0, a1, a0
@@ -3896,9 +3896,9 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ;
 ; CHECK64-IZFHMIN-LABEL: fcvt_w_h_sat_sext:
 ; CHECK64-IZFHMIN:       # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECK64-IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECK64-IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECK64-IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
 ; CHECK64-IZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a1, a0

diff  --git a/llvm/test/CodeGen/RISCV/half-fcmp.ll b/llvm/test/CodeGen/RISCV/half-fcmp.ll
index e7a13123cab8..f2402cf8a3d1 100644
--- a/llvm/test/CodeGen/RISCV/half-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-fcmp.ll
@@ -54,32 +54,32 @@ define i32 @fcmp_oeq(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_oeq:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a1
-; RV32I-NEXT:    fmv.h.x ft1, a0
-; RV32I-NEXT:    feq.h a0, ft1, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a1
+; RV32I-NEXT:    fmv.h.x fa4, a0
+; RV32I-NEXT:    feq.h a0, fa4, fa5
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_oeq:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a1
-; RV64I-NEXT:    fmv.h.x ft1, a0
-; RV64I-NEXT:    feq.h a0, ft1, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a1
+; RV64I-NEXT:    fmv.h.x fa4, a0
+; RV64I-NEXT:    feq.h a0, fa4, fa5
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_oeq:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a0, ft1, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a0, fa4, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_oeq:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a0
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp oeq half %a, %b
   %2 = zext i1 %1 to i32
@@ -94,32 +94,32 @@ define i32 @fcmp_ogt(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_ogt:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a0
-; RV32I-NEXT:    fmv.h.x ft1, a1
-; RV32I-NEXT:    flt.h a0, ft1, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a0
+; RV32I-NEXT:    fmv.h.x fa4, a1
+; RV32I-NEXT:    flt.h a0, fa4, fa5
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_ogt:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a0
-; RV64I-NEXT:    fmv.h.x ft1, a1
-; RV64I-NEXT:    flt.h a0, ft1, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a0
+; RV64I-NEXT:    fmv.h.x fa4, a1
+; RV64I-NEXT:    flt.h a0, fa4, fa5
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_ogt:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, ft1, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, fa4, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_ogt:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a1
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ogt half %a, %b
   %2 = zext i1 %1 to i32
@@ -134,32 +134,32 @@ define i32 @fcmp_oge(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_oge:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a0
-; RV32I-NEXT:    fmv.h.x ft1, a1
-; RV32I-NEXT:    fle.h a0, ft1, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a0
+; RV32I-NEXT:    fmv.h.x fa4, a1
+; RV32I-NEXT:    fle.h a0, fa4, fa5
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_oge:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a0
-; RV64I-NEXT:    fmv.h.x ft1, a1
-; RV64I-NEXT:    fle.h a0, ft1, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a0
+; RV64I-NEXT:    fmv.h.x fa4, a1
+; RV64I-NEXT:    fle.h a0, fa4, fa5
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_oge:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fle.s a0, ft1, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fle.s a0, fa4, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_oge:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a1
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fle.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fle.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp oge half %a, %b
   %2 = zext i1 %1 to i32
@@ -174,32 +174,32 @@ define i32 @fcmp_olt(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_olt:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a1
-; RV32I-NEXT:    fmv.h.x ft1, a0
-; RV32I-NEXT:    flt.h a0, ft1, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a1
+; RV32I-NEXT:    fmv.h.x fa4, a0
+; RV32I-NEXT:    flt.h a0, fa4, fa5
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_olt:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a1
-; RV64I-NEXT:    fmv.h.x ft1, a0
-; RV64I-NEXT:    flt.h a0, ft1, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a1
+; RV64I-NEXT:    fmv.h.x fa4, a0
+; RV64I-NEXT:    flt.h a0, fa4, fa5
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_olt:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, ft1, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, fa4, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_olt:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a0
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp olt half %a, %b
   %2 = zext i1 %1 to i32
@@ -214,32 +214,32 @@ define i32 @fcmp_ole(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_ole:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a1
-; RV32I-NEXT:    fmv.h.x ft1, a0
-; RV32I-NEXT:    fle.h a0, ft1, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a1
+; RV32I-NEXT:    fmv.h.x fa4, a0
+; RV32I-NEXT:    fle.h a0, fa4, fa5
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_ole:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a1
-; RV64I-NEXT:    fmv.h.x ft1, a0
-; RV64I-NEXT:    fle.h a0, ft1, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a1
+; RV64I-NEXT:    fmv.h.x fa4, a0
+; RV64I-NEXT:    fle.h a0, fa4, fa5
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_ole:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fle.s a0, ft1, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fle.s a0, fa4, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_ole:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a0
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fle.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fle.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ole half %a, %b
   %2 = zext i1 %1 to i32
@@ -256,39 +256,39 @@ define i32 @fcmp_one(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_one:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a1
-; RV32I-NEXT:    fmv.h.x ft1, a0
-; RV32I-NEXT:    flt.h a0, ft1, ft0
-; RV32I-NEXT:    flt.h a1, ft0, ft1
+; RV32I-NEXT:    fmv.h.x fa5, a1
+; RV32I-NEXT:    fmv.h.x fa4, a0
+; RV32I-NEXT:    flt.h a0, fa4, fa5
+; RV32I-NEXT:    flt.h a1, fa5, fa4
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_one:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a1
-; RV64I-NEXT:    fmv.h.x ft1, a0
-; RV64I-NEXT:    flt.h a0, ft1, ft0
-; RV64I-NEXT:    flt.h a1, ft0, ft1
+; RV64I-NEXT:    fmv.h.x fa5, a1
+; RV64I-NEXT:    fmv.h.x fa4, a0
+; RV64I-NEXT:    flt.h a0, fa4, fa5
+; RV64I-NEXT:    flt.h a1, fa5, fa4
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_one:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, ft1, ft0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a1, ft0, ft1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, fa4, fa5
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a1, fa5, fa4
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    or a0, a1, a0
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_one:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a0
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    flt.s a1, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa5, fa4
+; CHECKIZFHMIN-NEXT:    flt.s a1, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    or a0, a1, a0
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp one half %a, %b
@@ -306,39 +306,39 @@ define i32 @fcmp_ord(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_ord:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a0
-; RV32I-NEXT:    fmv.h.x ft1, a1
-; RV32I-NEXT:    feq.h a0, ft1, ft1
-; RV32I-NEXT:    feq.h a1, ft0, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a0
+; RV32I-NEXT:    fmv.h.x fa4, a1
+; RV32I-NEXT:    feq.h a0, fa4, fa4
+; RV32I-NEXT:    feq.h a1, fa5, fa5
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_ord:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a0
-; RV64I-NEXT:    fmv.h.x ft1, a1
-; RV64I-NEXT:    feq.h a0, ft1, ft1
-; RV64I-NEXT:    feq.h a1, ft0, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a0
+; RV64I-NEXT:    fmv.h.x fa4, a1
+; RV64I-NEXT:    feq.h a0, fa4, fa4
+; RV64I-NEXT:    feq.h a1, fa5, fa5
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_ord:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a0, ft0, ft0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a1, ft0, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a0, fa5, fa5
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a1, fa5, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    and a0, a1, a0
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_ord:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a0
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECKIZFHMIN-NEXT:    and a0, a1, a0
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ord half %a, %b
@@ -357,42 +357,42 @@ define i32 @fcmp_ueq(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_ueq:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a1
-; RV32I-NEXT:    fmv.h.x ft1, a0
-; RV32I-NEXT:    flt.h a0, ft1, ft0
-; RV32I-NEXT:    flt.h a1, ft0, ft1
+; RV32I-NEXT:    fmv.h.x fa5, a1
+; RV32I-NEXT:    fmv.h.x fa4, a0
+; RV32I-NEXT:    flt.h a0, fa4, fa5
+; RV32I-NEXT:    flt.h a1, fa5, fa4
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_ueq:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a1
-; RV64I-NEXT:    fmv.h.x ft1, a0
-; RV64I-NEXT:    flt.h a0, ft1, ft0
-; RV64I-NEXT:    flt.h a1, ft0, ft1
+; RV64I-NEXT:    fmv.h.x fa5, a1
+; RV64I-NEXT:    fmv.h.x fa4, a0
+; RV64I-NEXT:    flt.h a0, fa4, fa5
+; RV64I-NEXT:    flt.h a1, fa5, fa4
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_ueq:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, ft1, ft0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a1, ft0, ft1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, fa4, fa5
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a1, fa5, fa4
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    or a0, a1, a0
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_ueq:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a0
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    flt.s a1, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa5, fa4
+; CHECKIZFHMIN-NEXT:    flt.s a1, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    or a0, a1, a0
 ; CHECKIZFHMIN-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-NEXT:    ret
@@ -410,35 +410,35 @@ define i32 @fcmp_ugt(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_ugt:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a1
-; RV32I-NEXT:    fmv.h.x ft1, a0
-; RV32I-NEXT:    fle.h a0, ft1, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a1
+; RV32I-NEXT:    fmv.h.x fa4, a0
+; RV32I-NEXT:    fle.h a0, fa4, fa5
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_ugt:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a1
-; RV64I-NEXT:    fmv.h.x ft1, a0
-; RV64I-NEXT:    fle.h a0, ft1, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a1
+; RV64I-NEXT:    fmv.h.x fa4, a0
+; RV64I-NEXT:    fle.h a0, fa4, fa5
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_ugt:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fle.s a0, ft1, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fle.s a0, fa4, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_ugt:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a0
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fle.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fle.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ugt half %a, %b
@@ -455,35 +455,35 @@ define i32 @fcmp_uge(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_uge:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a1
-; RV32I-NEXT:    fmv.h.x ft1, a0
-; RV32I-NEXT:    flt.h a0, ft1, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a1
+; RV32I-NEXT:    fmv.h.x fa4, a0
+; RV32I-NEXT:    flt.h a0, fa4, fa5
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_uge:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a1
-; RV64I-NEXT:    fmv.h.x ft1, a0
-; RV64I-NEXT:    flt.h a0, ft1, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a1
+; RV64I-NEXT:    fmv.h.x fa4, a0
+; RV64I-NEXT:    flt.h a0, fa4, fa5
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_uge:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, ft1, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, fa4, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_uge:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a0
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp uge half %a, %b
@@ -500,35 +500,35 @@ define i32 @fcmp_ult(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_ult:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a0
-; RV32I-NEXT:    fmv.h.x ft1, a1
-; RV32I-NEXT:    fle.h a0, ft1, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a0
+; RV32I-NEXT:    fmv.h.x fa4, a1
+; RV32I-NEXT:    fle.h a0, fa4, fa5
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_ult:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a0
-; RV64I-NEXT:    fmv.h.x ft1, a1
-; RV64I-NEXT:    fle.h a0, ft1, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a0
+; RV64I-NEXT:    fmv.h.x fa4, a1
+; RV64I-NEXT:    fle.h a0, fa4, fa5
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_ult:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fle.s a0, ft1, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fle.s a0, fa4, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_ult:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a1
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fle.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fle.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ult half %a, %b
@@ -545,35 +545,35 @@ define i32 @fcmp_ule(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_ule:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a0
-; RV32I-NEXT:    fmv.h.x ft1, a1
-; RV32I-NEXT:    flt.h a0, ft1, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a0
+; RV32I-NEXT:    fmv.h.x fa4, a1
+; RV32I-NEXT:    flt.h a0, fa4, fa5
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_ule:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a0
-; RV64I-NEXT:    fmv.h.x ft1, a1
-; RV64I-NEXT:    flt.h a0, ft1, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a0
+; RV64I-NEXT:    fmv.h.x fa4, a1
+; RV64I-NEXT:    flt.h a0, fa4, fa5
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_ule:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, ft1, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    flt.s a0, fa4, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_ule:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a1
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ule half %a, %b
@@ -590,35 +590,35 @@ define i32 @fcmp_une(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_une:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a1
-; RV32I-NEXT:    fmv.h.x ft1, a0
-; RV32I-NEXT:    feq.h a0, ft1, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a1
+; RV32I-NEXT:    fmv.h.x fa4, a0
+; RV32I-NEXT:    feq.h a0, fa4, fa5
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_une:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a1
-; RV64I-NEXT:    fmv.h.x ft1, a0
-; RV64I-NEXT:    feq.h a0, ft1, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a1
+; RV64I-NEXT:    fmv.h.x fa4, a0
+; RV64I-NEXT:    feq.h a0, fa4, fa5
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_une:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a0, ft1, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a0, fa4, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_une:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a0
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp une half %a, %b
@@ -637,42 +637,42 @@ define i32 @fcmp_uno(half %a, half %b) nounwind {
 ;
 ; RV32I-LABEL: fcmp_uno:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    fmv.h.x ft0, a0
-; RV32I-NEXT:    fmv.h.x ft1, a1
-; RV32I-NEXT:    feq.h a0, ft1, ft1
-; RV32I-NEXT:    feq.h a1, ft0, ft0
+; RV32I-NEXT:    fmv.h.x fa5, a0
+; RV32I-NEXT:    fmv.h.x fa4, a1
+; RV32I-NEXT:    feq.h a0, fa4, fa4
+; RV32I-NEXT:    feq.h a1, fa5, fa5
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_uno:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    fmv.h.x ft0, a0
-; RV64I-NEXT:    fmv.h.x ft1, a1
-; RV64I-NEXT:    feq.h a0, ft1, ft1
-; RV64I-NEXT:    feq.h a1, ft0, ft0
+; RV64I-NEXT:    fmv.h.x fa5, a0
+; RV64I-NEXT:    fmv.h.x fa4, a1
+; RV64I-NEXT:    feq.h a0, fa4, fa4
+; RV64I-NEXT:    feq.h a1, fa5, fa5
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
 ;
 ; CHECKIZFHMIN-ILP32F-LP64F-LABEL: fcmp_uno:
 ; CHECKIZFHMIN-ILP32F-LP64F:       # %bb.0:
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a0, ft0, ft0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a1, ft0, ft0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a0, fa5, fa5
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    feq.s a1, fa5, fa5
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    and a0, a1, a0
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-ILP32F-LP64F-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fcmp_uno:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft0, a0
-; CHECKIZFHMIN-NEXT:    fmv.h.x ft1, a1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft1, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT:    fmv.h.x fa4, a1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa4, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECKIZFHMIN-NEXT:    and a0, a1, a0
 ; CHECKIZFHMIN-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/half-imm.ll b/llvm/test/CodeGen/RISCV/half-imm.ll
index 1daeee976209..5226e5bdb407 100644
--- a/llvm/test/CodeGen/RISCV/half-imm.ll
+++ b/llvm/test/CodeGen/RISCV/half-imm.ll
@@ -28,17 +28,17 @@ define half @half_imm_op(half %a) nounwind {
 ; CHECK-LABEL: half_imm_op:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT:    fadd.h fa0, fa0, ft0
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    fadd.h fa0, fa0, fa5
 ; CHECK-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: half_imm_op:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 260096
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fadd half %a, 1.0
   ret half %1

diff  --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index 676fb4090628..7183e8aa9314 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -67,9 +67,9 @@ define half @sqrt_f16(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: sqrt_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fsqrt.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fsqrt.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.sqrt.f16(half %a)
   ret half %1
@@ -327,8 +327,8 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32IFZFH-NEXT:    fcvt.h.s fs1, fa0
 ; RV32IFZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IFZFH-NEXT:    call cosf at plt
-; RV32IFZFH-NEXT:    fcvt.h.s ft0, fa0
-; RV32IFZFH-NEXT:    fadd.h fa0, fs1, ft0
+; RV32IFZFH-NEXT:    fcvt.h.s fa5, fa0
+; RV32IFZFH-NEXT:    fadd.h fa0, fs1, fa5
 ; RV32IFZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFZFH-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
 ; RV32IFZFH-NEXT:    flw fs1, 4(sp) # 4-byte Folded Reload
@@ -347,8 +347,8 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64IFZFH-NEXT:    fcvt.h.s fs1, fa0
 ; RV64IFZFH-NEXT:    fmv.s fa0, fs0
 ; RV64IFZFH-NEXT:    call cosf at plt
-; RV64IFZFH-NEXT:    fcvt.h.s ft0, fa0
-; RV64IFZFH-NEXT:    fadd.h fa0, fs1, ft0
+; RV64IFZFH-NEXT:    fcvt.h.s fa5, fa0
+; RV64IFZFH-NEXT:    fadd.h fa0, fs1, fa5
 ; RV64IFZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFZFH-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
 ; RV64IFZFH-NEXT:    flw fs1, 0(sp) # 4-byte Folded Reload
@@ -367,8 +367,8 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32IDZFH-NEXT:    fcvt.h.s fs1, fa0
 ; RV32IDZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IDZFH-NEXT:    call cosf at plt
-; RV32IDZFH-NEXT:    fcvt.h.s ft0, fa0
-; RV32IDZFH-NEXT:    fadd.h fa0, fs1, ft0
+; RV32IDZFH-NEXT:    fcvt.h.s fa5, fa0
+; RV32IDZFH-NEXT:    fadd.h fa0, fs1, fa5
 ; RV32IDZFH-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV32IDZFH-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
@@ -387,8 +387,8 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64IDZFH-NEXT:    fcvt.h.s fs1, fa0
 ; RV64IDZFH-NEXT:    fmv.s fa0, fs0
 ; RV64IDZFH-NEXT:    call cosf at plt
-; RV64IDZFH-NEXT:    fcvt.h.s ft0, fa0
-; RV64IDZFH-NEXT:    fadd.h fa0, fs1, ft0
+; RV64IDZFH-NEXT:    fcvt.h.s fa5, fa0
+; RV64IDZFH-NEXT:    fadd.h fa0, fs1, fa5
 ; RV64IDZFH-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IDZFH-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV64IDZFH-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
@@ -477,11 +477,11 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32IFZFHMIN-NEXT:    fcvt.h.s fs1, fa0
 ; RV32IFZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IFZFHMIN-NEXT:    call cosf at plt
-; RV32IFZFHMIN-NEXT:    fcvt.h.s ft0, fa0
-; RV32IFZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IFZFHMIN-NEXT:    fcvt.s.h ft1, fs1
-; RV32IFZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IFZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IFZFHMIN-NEXT:    fcvt.h.s fa5, fa0
+; RV32IFZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IFZFHMIN-NEXT:    fcvt.s.h fa4, fs1
+; RV32IFZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV32IFZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IFZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFZFHMIN-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
 ; RV32IFZFHMIN-NEXT:    flw fs1, 4(sp) # 4-byte Folded Reload
@@ -500,11 +500,11 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64IFZFHMIN-NEXT:    fcvt.h.s fs1, fa0
 ; RV64IFZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV64IFZFHMIN-NEXT:    call cosf at plt
-; RV64IFZFHMIN-NEXT:    fcvt.h.s ft0, fa0
-; RV64IFZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IFZFHMIN-NEXT:    fcvt.s.h ft1, fs1
-; RV64IFZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IFZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IFZFHMIN-NEXT:    fcvt.h.s fa5, fa0
+; RV64IFZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IFZFHMIN-NEXT:    fcvt.s.h fa4, fs1
+; RV64IFZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV64IFZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IFZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFZFHMIN-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
 ; RV64IFZFHMIN-NEXT:    flw fs1, 0(sp) # 4-byte Folded Reload
@@ -523,11 +523,11 @@ define half @sincos_f16(half %a) nounwind {
 ; RV32IDZFHMIN-NEXT:    fcvt.h.s fs1, fa0
 ; RV32IDZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IDZFHMIN-NEXT:    call cosf at plt
-; RV32IDZFHMIN-NEXT:    fcvt.h.s ft0, fa0
-; RV32IDZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IDZFHMIN-NEXT:    fcvt.s.h ft1, fs1
-; RV32IDZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IDZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IDZFHMIN-NEXT:    fcvt.h.s fa5, fa0
+; RV32IDZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IDZFHMIN-NEXT:    fcvt.s.h fa4, fs1
+; RV32IDZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV32IDZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IDZFHMIN-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IDZFHMIN-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV32IDZFHMIN-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
@@ -546,11 +546,11 @@ define half @sincos_f16(half %a) nounwind {
 ; RV64IDZFHMIN-NEXT:    fcvt.h.s fs1, fa0
 ; RV64IDZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV64IDZFHMIN-NEXT:    call cosf at plt
-; RV64IDZFHMIN-NEXT:    fcvt.h.s ft0, fa0
-; RV64IDZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IDZFHMIN-NEXT:    fcvt.s.h ft1, fs1
-; RV64IDZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IDZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IDZFHMIN-NEXT:    fcvt.h.s fa5, fa0
+; RV64IDZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IDZFHMIN-NEXT:    fcvt.s.h fa4, fs1
+; RV64IDZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV64IDZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IDZFHMIN-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IDZFHMIN-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV64IDZFHMIN-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
@@ -1124,11 +1124,11 @@ define half @fma_f16(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fma_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa2
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft2, fa0
-; CHECKIZFHMIN-NEXT:    fmadd.s ft0, ft2, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa2
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa3, fa0
+; CHECKIZFHMIN-NEXT:    fmadd.s fa5, fa3, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.fma.f16(half %a, half %b, half %c)
   ret half %1
@@ -1220,14 +1220,14 @@ define half @fmuladd_f16(half %a, half %b, half %c) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fmuladd_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fmul.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa2
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fmul.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa2
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.fmuladd.f16(half %a, half %b, half %c)
   ret half %1
@@ -1255,9 +1255,9 @@ define half @fabs_f16(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: fabs_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fabs.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fabs.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.fabs.f16(half %a)
   ret half %1
@@ -1325,10 +1325,10 @@ define half @minnum_f16(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: minnum_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fmin.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fmin.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.minnum.f16(half %a, half %b)
   ret half %1
@@ -1396,10 +1396,10 @@ define half @maxnum_f16(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: maxnum_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fmax.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fmax.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.maxnum.f16(half %a, half %b)
   ret half %1
@@ -1487,14 +1487,14 @@ define half @floor_f16(half %a) nounwind {
 ; CHECKIZFH-LABEL: floor_f16:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI17_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI17_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI17_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB17_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0, rdn
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0, rdn
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0, rdn
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB17_2:
 ; CHECKIZFH-NEXT:    ret
 ;
@@ -1526,18 +1526,18 @@ define half @floor_f16(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: floor_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB17_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB17_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.floor.f16(half %a)
   ret half %1
@@ -1549,14 +1549,14 @@ define half @ceil_f16(half %a) nounwind {
 ; CHECKIZFH-LABEL: ceil_f16:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI18_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI18_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI18_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB18_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0, rup
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0, rup
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0, rup
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB18_2:
 ; CHECKIZFH-NEXT:    ret
 ;
@@ -1588,18 +1588,18 @@ define half @ceil_f16(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: ceil_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB18_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB18_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.ceil.f16(half %a)
   ret half %1
@@ -1611,14 +1611,14 @@ define half @trunc_f16(half %a) nounwind {
 ; CHECKIZFH-LABEL: trunc_f16:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI19_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI19_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI19_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB19_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0, rtz
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0, rtz
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0, rtz
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB19_2:
 ; CHECKIZFH-NEXT:    ret
 ;
@@ -1650,18 +1650,18 @@ define half @trunc_f16(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: trunc_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB19_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB19_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.trunc.f16(half %a)
   ret half %1
@@ -1673,14 +1673,14 @@ define half @rint_f16(half %a) nounwind {
 ; CHECKIZFH-LABEL: rint_f16:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI20_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI20_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI20_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB20_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB20_2:
 ; CHECKIZFH-NEXT:    ret
 ;
@@ -1712,18 +1712,18 @@ define half @rint_f16(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: rint_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB20_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB20_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.rint.f16(half %a)
   ret half %1
@@ -1811,14 +1811,14 @@ define half @round_f16(half %a) nounwind {
 ; CHECKIZFH-LABEL: round_f16:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI22_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI22_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI22_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB22_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0, rmm
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0, rmm
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0, rmm
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB22_2:
 ; CHECKIZFH-NEXT:    ret
 ;
@@ -1850,18 +1850,18 @@ define half @round_f16(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: round_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB22_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB22_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.round.f16(half %a)
   ret half %1
@@ -1873,14 +1873,14 @@ define half @roundeven_f16(half %a) nounwind {
 ; CHECKIZFH-LABEL: roundeven_f16:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI23_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI23_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI23_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB23_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0, rne
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0, rne
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0, rne
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB23_2:
 ; CHECKIZFH-NEXT:    ret
 ;
@@ -1912,18 +1912,18 @@ define half @roundeven_f16(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: roundeven_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB23_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB23_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call half @llvm.roundeven.f16(half %a)
   ret half %1

diff  --git a/llvm/test/CodeGen/RISCV/half-isnan.ll b/llvm/test/CodeGen/RISCV/half-isnan.ll
index 751ce9ffdcc3..6b39c08f41d6 100644
--- a/llvm/test/CodeGen/RISCV/half-isnan.ll
+++ b/llvm/test/CodeGen/RISCV/half-isnan.ll
@@ -17,8 +17,8 @@ define zeroext i1 @half_is_nan(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: half_is_nan:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa5, fa5
 ; CHECKIZFHMIN-NEXT:    xori a0, a0, 1
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp uno half %a, 0.000000e+00
@@ -33,8 +33,8 @@ define zeroext i1 @half_not_nan(half %a) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: half_not_nan:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa5, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ord half %a, 0.000000e+00
   ret i1 %1

diff  --git a/llvm/test/CodeGen/RISCV/half-mem.ll b/llvm/test/CodeGen/RISCV/half-mem.ll
index e13e818f1360..ebaec2cbd9f8 100644
--- a/llvm/test/CodeGen/RISCV/half-mem.ll
+++ b/llvm/test/CodeGen/RISCV/half-mem.ll
@@ -11,19 +11,19 @@
 define half @flh(ptr %a) nounwind {
 ; CHECKIZFH-LABEL: flh:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    flh ft0, 0(a0)
-; CHECKIZFH-NEXT:    flh ft1, 6(a0)
-; CHECKIZFH-NEXT:    fadd.h fa0, ft0, ft1
+; CHECKIZFH-NEXT:    flh fa5, 0(a0)
+; CHECKIZFH-NEXT:    flh fa4, 6(a0)
+; CHECKIZFH-NEXT:    fadd.h fa0, fa5, fa4
 ; CHECKIZFH-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: flh:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    flh ft0, 6(a0)
-; CHECKIZFHMIN-NEXT:    flh ft1, 0(a0)
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    flh fa5, 6(a0)
+; CHECKIZFHMIN-NEXT:    flh fa4, 0(a0)
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = load half, ptr %a
   %2 = getelementptr half, ptr %a, i32 3
@@ -39,19 +39,19 @@ define dso_local void @fsh(ptr %a, half %b, half %c) nounwind {
 ; are used, even for the soft half ABI
 ; CHECKIZFH-LABEL: fsh:
 ; CHECKIZFH:       # %bb.0:
-; CHECKIZFH-NEXT:    fadd.h ft0, fa0, fa1
-; CHECKIZFH-NEXT:    fsh ft0, 0(a0)
-; CHECKIZFH-NEXT:    fsh ft0, 16(a0)
+; CHECKIZFH-NEXT:    fadd.h fa5, fa0, fa1
+; CHECKIZFH-NEXT:    fsh fa5, 0(a0)
+; CHECKIZFH-NEXT:    fsh fa5, 16(a0)
 ; CHECKIZFH-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: fsh:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fsh ft0, 0(a0)
-; CHECKIZFHMIN-NEXT:    fsh ft0, 16(a0)
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fsh fa5, 0(a0)
+; CHECKIZFHMIN-NEXT:    fsh fa5, 16(a0)
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fadd half %b, %c
   store half %1, ptr %a
@@ -70,24 +70,24 @@ define half @flh_fsh_global(half %a, half %b) nounwind {
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    fadd.h fa0, fa0, fa1
 ; CHECKIZFH-NEXT:    lui a0, %hi(G)
-; CHECKIZFH-NEXT:    flh ft0, %lo(G)(a0)
+; CHECKIZFH-NEXT:    flh fa5, %lo(G)(a0)
 ; CHECKIZFH-NEXT:    addi a1, a0, %lo(G)
 ; CHECKIZFH-NEXT:    fsh fa0, %lo(G)(a0)
-; CHECKIZFH-NEXT:    flh ft0, 18(a1)
+; CHECKIZFH-NEXT:    flh fa5, 18(a1)
 ; CHECKIZFH-NEXT:    fsh fa0, 18(a1)
 ; CHECKIZFH-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: flh_fsh_global:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    lui a0, %hi(G)
-; CHECKIZFHMIN-NEXT:    flh ft0, %lo(G)(a0)
+; CHECKIZFHMIN-NEXT:    flh fa5, %lo(G)(a0)
 ; CHECKIZFHMIN-NEXT:    addi a1, a0, %lo(G)
 ; CHECKIZFHMIN-NEXT:    fsh fa0, %lo(G)(a0)
-; CHECKIZFHMIN-NEXT:    flh ft0, 18(a1)
+; CHECKIZFHMIN-NEXT:    flh fa5, 18(a1)
 ; CHECKIZFHMIN-NEXT:    fsh fa0, 18(a1)
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fadd half %a, %b
@@ -104,8 +104,8 @@ define half @flh_fsh_constant(half %a) nounwind {
 ; RV32IZFH-LABEL: flh_fsh_constant:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    lui a0, 912092
-; RV32IZFH-NEXT:    flh ft0, -273(a0)
-; RV32IZFH-NEXT:    fadd.h fa0, fa0, ft0
+; RV32IZFH-NEXT:    flh fa5, -273(a0)
+; RV32IZFH-NEXT:    fadd.h fa0, fa0, fa5
 ; RV32IZFH-NEXT:    fsh fa0, -273(a0)
 ; RV32IZFH-NEXT:    ret
 ;
@@ -113,19 +113,19 @@ define half @flh_fsh_constant(half %a) nounwind {
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    lui a0, 228023
 ; RV64IZFH-NEXT:    slli a0, a0, 2
-; RV64IZFH-NEXT:    flh ft0, -273(a0)
-; RV64IZFH-NEXT:    fadd.h fa0, fa0, ft0
+; RV64IZFH-NEXT:    flh fa5, -273(a0)
+; RV64IZFH-NEXT:    fadd.h fa0, fa0, fa5
 ; RV64IZFH-NEXT:    fsh fa0, -273(a0)
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IZFHMIN-LABEL: flh_fsh_constant:
 ; RV32IZFHMIN:       # %bb.0:
 ; RV32IZFHMIN-NEXT:    lui a0, 912092
-; RV32IZFHMIN-NEXT:    flh ft0, -273(a0)
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    flh fa5, -273(a0)
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    fsh fa0, -273(a0)
 ; RV32IZFHMIN-NEXT:    ret
 ;
@@ -133,11 +133,11 @@ define half @flh_fsh_constant(half %a) nounwind {
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    lui a0, 228023
 ; RV64IZFHMIN-NEXT:    slli a0, a0, 2
-; RV64IZFHMIN-NEXT:    flh ft0, -273(a0)
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    flh fa5, -273(a0)
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    fsh fa0, -273(a0)
 ; RV64IZFHMIN-NEXT:    ret
   %1 = inttoptr i32 3735928559 to ptr
@@ -158,8 +158,8 @@ define half @flh_stack(half %a) nounwind {
 ; RV32IZFH-NEXT:    fmv.h fs0, fa0
 ; RV32IZFH-NEXT:    addi a0, sp, 4
 ; RV32IZFH-NEXT:    call notdead at plt
-; RV32IZFH-NEXT:    flh ft0, 4(sp)
-; RV32IZFH-NEXT:    fadd.h fa0, ft0, fs0
+; RV32IZFH-NEXT:    flh fa5, 4(sp)
+; RV32IZFH-NEXT:    fadd.h fa0, fa5, fs0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -173,8 +173,8 @@ define half @flh_stack(half %a) nounwind {
 ; RV64IZFH-NEXT:    fmv.h fs0, fa0
 ; RV64IZFH-NEXT:    mv a0, sp
 ; RV64IZFH-NEXT:    call notdead at plt
-; RV64IZFH-NEXT:    flh ft0, 0(sp)
-; RV64IZFH-NEXT:    fadd.h fa0, ft0, fs0
+; RV64IZFH-NEXT:    flh fa5, 0(sp)
+; RV64IZFH-NEXT:    fadd.h fa0, fa5, fs0
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -188,11 +188,11 @@ define half @flh_stack(half %a) nounwind {
 ; RV32IZFHMIN-NEXT:    fmv.s fs0, fa0
 ; RV32IZFHMIN-NEXT:    addi a0, sp, 4
 ; RV32IZFHMIN-NEXT:    call notdead at plt
-; RV32IZFHMIN-NEXT:    flh ft0, 4(sp)
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fs0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    flh fa5, 4(sp)
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fs0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -206,11 +206,11 @@ define half @flh_stack(half %a) nounwind {
 ; RV64IZFHMIN-NEXT:    fmv.s fs0, fa0
 ; RV64IZFHMIN-NEXT:    mv a0, sp
 ; RV64IZFHMIN-NEXT:    call notdead at plt
-; RV64IZFHMIN-NEXT:    flh ft0, 0(sp)
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fs0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    flh fa5, 0(sp)
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fs0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
 ; RV64IZFHMIN-NEXT:    addi sp, sp, 16
@@ -227,8 +227,8 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    fadd.h ft0, fa0, fa1
-; RV32IZFH-NEXT:    fsh ft0, 8(sp)
+; RV32IZFH-NEXT:    fadd.h fa5, fa0, fa1
+; RV32IZFH-NEXT:    fsh fa5, 8(sp)
 ; RV32IZFH-NEXT:    addi a0, sp, 8
 ; RV32IZFH-NEXT:    call notdead at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -239,8 +239,8 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT:    fadd.h ft0, fa0, fa1
-; RV64IZFH-NEXT:    fsh ft0, 4(sp)
+; RV64IZFH-NEXT:    fadd.h fa5, fa0, fa1
+; RV64IZFH-NEXT:    fsh fa5, 4(sp)
 ; RV64IZFH-NEXT:    addi a0, sp, 4
 ; RV64IZFH-NEXT:    call notdead at plt
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -251,11 +251,11 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV32IZFHMIN:       # %bb.0:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV32IZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fsh ft0, 8(sp)
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV32IZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fsh fa5, 8(sp)
 ; RV32IZFHMIN-NEXT:    addi a0, sp, 8
 ; RV32IZFHMIN-NEXT:    call notdead at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -266,11 +266,11 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV64IZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fsh ft0, 4(sp)
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fsh fa5, 4(sp)
 ; RV64IZFHMIN-NEXT:    addi a0, sp, 4
 ; RV64IZFHMIN-NEXT:    call notdead at plt
 ; RV64IZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
index 453deaf0b952..63af17b80a41 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
@@ -20,21 +20,21 @@ define signext i32 @test_floor_si32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_floor_si32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB0_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB0_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECKIZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECKIZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECKIZFHMIN-NEXT:    seqz a1, a1
 ; CHECKIZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECKIZFHMIN-NEXT:    and a0, a1, a0
@@ -52,19 +52,19 @@ define i64 @test_floor_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB1_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rdn
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rdn
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rdn
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB1_2:
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IZFH-NEXT:    lui a0, 913408
-; RV32IZFH-NEXT:    fmv.w.x ft0, a0
-; RV32IZFH-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, a0
+; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixsfdi at plt
 ; RV32IZFH-NEXT:    lui a3, 524288
@@ -73,8 +73,8 @@ define i64 @test_floor_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    lui a1, 524288
 ; RV32IZFH-NEXT:  .LBB1_4:
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI1_1)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI1_1)(a2)
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI1_1)(a2)
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    beqz a2, .LBB1_6
 ; RV32IZFH-NEXT:  # %bb.5:
 ; RV32IZFH-NEXT:    addi a1, a3, -1
@@ -105,26 +105,26 @@ define i64 @test_floor_si64(half %x) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: test_floor_si64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB1_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB1_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, fa5
 ; RV32IZFHMIN-NEXT:    lui a0, 913408
-; RV32IZFHMIN-NEXT:    fmv.w.x ft0, a0
-; RV32IZFHMIN-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
+; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
 ; RV32IZFHMIN-NEXT:    lui a3, 524288
@@ -133,8 +133,8 @@ define i64 @test_floor_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    lui a1, 524288
 ; RV32IZFHMIN-NEXT:  .LBB1_4:
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI1_0)
-; RV32IZFHMIN-NEXT:    flw ft0, %lo(.LCPI1_0)(a2)
-; RV32IZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI1_0)(a2)
+; RV32IZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFHMIN-NEXT:    beqz a2, .LBB1_6
 ; RV32IZFHMIN-NEXT:  # %bb.5:
 ; RV32IZFHMIN-NEXT:    addi a1, a3, -1
@@ -156,21 +156,21 @@ define i64 @test_floor_si64(half %x) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: test_floor_si64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB1_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB1_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
@@ -192,21 +192,21 @@ define signext i32 @test_floor_ui32(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_floor_ui32:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB2_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB2_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV32IZFHMIN-NEXT:    seqz a1, a1
 ; RV32IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV32IZFHMIN-NEXT:    and a0, a1, a0
@@ -214,21 +214,21 @@ define signext i32 @test_floor_ui32(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_floor_ui32:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB2_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB2_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a0, a1
@@ -246,25 +246,25 @@ define i64 @test_floor_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB3_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rdn
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rdn
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rdn
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB3_2:
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
-; RV32IZFH-NEXT:    fmv.w.x ft0, zero
-; RV32IZFH-NEXT:    fle.s a0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, zero
+; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixunssfdi at plt
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI3_1)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI3_1)(a2)
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI3_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    neg a2, a2
 ; RV32IZFH-NEXT:    or a0, a2, a0
 ; RV32IZFH-NEXT:    and a1, s0, a1
@@ -286,32 +286,32 @@ define i64 @test_floor_ui64(half %x) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: test_floor_ui64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB3_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB3_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, ft0
-; RV32IZFHMIN-NEXT:    fmv.w.x ft0, zero
-; RV32IZFHMIN-NEXT:    fle.s a0, ft0, fs0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, fa5
+; RV32IZFHMIN-NEXT:    fmv.w.x fa5, zero
+; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI3_0)
-; RV32IZFHMIN-NEXT:    flw ft0, %lo(.LCPI3_0)(a2)
+; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI3_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
-; RV32IZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg a2, a2
 ; RV32IZFHMIN-NEXT:    or a0, a2, a0
 ; RV32IZFHMIN-NEXT:    and a1, s0, a1
@@ -324,21 +324,21 @@ define i64 @test_floor_ui64(half %x) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: test_floor_ui64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB3_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB3_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
@@ -360,21 +360,21 @@ define signext i32 @test_ceil_si32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_ceil_si32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB4_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB4_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECKIZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECKIZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECKIZFHMIN-NEXT:    seqz a1, a1
 ; CHECKIZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECKIZFHMIN-NEXT:    and a0, a1, a0
@@ -392,19 +392,19 @@ define i64 @test_ceil_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI5_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB5_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rup
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rup
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rup
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB5_2:
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IZFH-NEXT:    lui a0, 913408
-; RV32IZFH-NEXT:    fmv.w.x ft0, a0
-; RV32IZFH-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, a0
+; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixsfdi at plt
 ; RV32IZFH-NEXT:    lui a3, 524288
@@ -413,8 +413,8 @@ define i64 @test_ceil_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    lui a1, 524288
 ; RV32IZFH-NEXT:  .LBB5_4:
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI5_1)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI5_1)(a2)
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI5_1)(a2)
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    beqz a2, .LBB5_6
 ; RV32IZFH-NEXT:  # %bb.5:
 ; RV32IZFH-NEXT:    addi a1, a3, -1
@@ -445,26 +445,26 @@ define i64 @test_ceil_si64(half %x) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: test_ceil_si64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB5_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB5_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, fa5
 ; RV32IZFHMIN-NEXT:    lui a0, 913408
-; RV32IZFHMIN-NEXT:    fmv.w.x ft0, a0
-; RV32IZFHMIN-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
+; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
 ; RV32IZFHMIN-NEXT:    lui a3, 524288
@@ -473,8 +473,8 @@ define i64 @test_ceil_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    lui a1, 524288
 ; RV32IZFHMIN-NEXT:  .LBB5_4:
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI5_0)
-; RV32IZFHMIN-NEXT:    flw ft0, %lo(.LCPI5_0)(a2)
-; RV32IZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI5_0)(a2)
+; RV32IZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFHMIN-NEXT:    beqz a2, .LBB5_6
 ; RV32IZFHMIN-NEXT:  # %bb.5:
 ; RV32IZFHMIN-NEXT:    addi a1, a3, -1
@@ -496,21 +496,21 @@ define i64 @test_ceil_si64(half %x) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_si64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB5_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB5_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
@@ -532,21 +532,21 @@ define signext i32 @test_ceil_ui32(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_ceil_ui32:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB6_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB6_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV32IZFHMIN-NEXT:    seqz a1, a1
 ; RV32IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV32IZFHMIN-NEXT:    and a0, a1, a0
@@ -554,21 +554,21 @@ define signext i32 @test_ceil_ui32(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_ui32:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB6_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB6_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a0, a1
@@ -586,25 +586,25 @@ define i64 @test_ceil_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI7_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI7_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI7_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB7_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rup
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rup
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rup
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB7_2:
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
-; RV32IZFH-NEXT:    fmv.w.x ft0, zero
-; RV32IZFH-NEXT:    fle.s a0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, zero
+; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixunssfdi at plt
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI7_1)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI7_1)(a2)
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI7_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    neg a2, a2
 ; RV32IZFH-NEXT:    or a0, a2, a0
 ; RV32IZFH-NEXT:    and a1, s0, a1
@@ -626,32 +626,32 @@ define i64 @test_ceil_ui64(half %x) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: test_ceil_ui64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB7_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB7_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, ft0
-; RV32IZFHMIN-NEXT:    fmv.w.x ft0, zero
-; RV32IZFHMIN-NEXT:    fle.s a0, ft0, fs0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, fa5
+; RV32IZFHMIN-NEXT:    fmv.w.x fa5, zero
+; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI7_0)
-; RV32IZFHMIN-NEXT:    flw ft0, %lo(.LCPI7_0)(a2)
+; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI7_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
-; RV32IZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg a2, a2
 ; RV32IZFHMIN-NEXT:    or a0, a2, a0
 ; RV32IZFHMIN-NEXT:    and a1, s0, a1
@@ -664,21 +664,21 @@ define i64 @test_ceil_ui64(half %x) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_ui64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB7_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB7_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
@@ -700,21 +700,21 @@ define signext i32 @test_trunc_si32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_trunc_si32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB8_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB8_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECKIZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECKIZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECKIZFHMIN-NEXT:    seqz a1, a1
 ; CHECKIZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECKIZFHMIN-NEXT:    and a0, a1, a0
@@ -732,19 +732,19 @@ define i64 @test_trunc_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI9_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI9_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI9_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB9_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rtz
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rtz
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB9_2:
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IZFH-NEXT:    lui a0, 913408
-; RV32IZFH-NEXT:    fmv.w.x ft0, a0
-; RV32IZFH-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, a0
+; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixsfdi at plt
 ; RV32IZFH-NEXT:    lui a3, 524288
@@ -753,8 +753,8 @@ define i64 @test_trunc_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    lui a1, 524288
 ; RV32IZFH-NEXT:  .LBB9_4:
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI9_1)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI9_1)(a2)
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI9_1)(a2)
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    beqz a2, .LBB9_6
 ; RV32IZFH-NEXT:  # %bb.5:
 ; RV32IZFH-NEXT:    addi a1, a3, -1
@@ -785,26 +785,26 @@ define i64 @test_trunc_si64(half %x) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: test_trunc_si64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB9_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB9_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, fa5
 ; RV32IZFHMIN-NEXT:    lui a0, 913408
-; RV32IZFHMIN-NEXT:    fmv.w.x ft0, a0
-; RV32IZFHMIN-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
+; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
 ; RV32IZFHMIN-NEXT:    lui a3, 524288
@@ -813,8 +813,8 @@ define i64 @test_trunc_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    lui a1, 524288
 ; RV32IZFHMIN-NEXT:  .LBB9_4:
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI9_0)
-; RV32IZFHMIN-NEXT:    flw ft0, %lo(.LCPI9_0)(a2)
-; RV32IZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI9_0)(a2)
+; RV32IZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFHMIN-NEXT:    beqz a2, .LBB9_6
 ; RV32IZFHMIN-NEXT:  # %bb.5:
 ; RV32IZFHMIN-NEXT:    addi a1, a3, -1
@@ -836,21 +836,21 @@ define i64 @test_trunc_si64(half %x) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_si64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB9_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB9_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
@@ -872,21 +872,21 @@ define signext i32 @test_trunc_ui32(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_trunc_ui32:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB10_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB10_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV32IZFHMIN-NEXT:    seqz a1, a1
 ; RV32IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV32IZFHMIN-NEXT:    and a0, a1, a0
@@ -894,21 +894,21 @@ define signext i32 @test_trunc_ui32(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_ui32:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB10_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB10_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a0, a1
@@ -926,25 +926,25 @@ define i64 @test_trunc_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI11_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI11_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI11_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB11_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rtz
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rtz
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB11_2:
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
-; RV32IZFH-NEXT:    fmv.w.x ft0, zero
-; RV32IZFH-NEXT:    fle.s a0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, zero
+; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixunssfdi at plt
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI11_1)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI11_1)(a2)
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI11_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    neg a2, a2
 ; RV32IZFH-NEXT:    or a0, a2, a0
 ; RV32IZFH-NEXT:    and a1, s0, a1
@@ -966,32 +966,32 @@ define i64 @test_trunc_ui64(half %x) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: test_trunc_ui64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB11_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB11_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, ft0
-; RV32IZFHMIN-NEXT:    fmv.w.x ft0, zero
-; RV32IZFHMIN-NEXT:    fle.s a0, ft0, fs0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, fa5
+; RV32IZFHMIN-NEXT:    fmv.w.x fa5, zero
+; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI11_0)
-; RV32IZFHMIN-NEXT:    flw ft0, %lo(.LCPI11_0)(a2)
+; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI11_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
-; RV32IZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg a2, a2
 ; RV32IZFHMIN-NEXT:    or a0, a2, a0
 ; RV32IZFHMIN-NEXT:    and a1, s0, a1
@@ -1004,21 +1004,21 @@ define i64 @test_trunc_ui64(half %x) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_ui64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB11_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB11_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
@@ -1040,21 +1040,21 @@ define signext i32 @test_round_si32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_round_si32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB12_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB12_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECKIZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECKIZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECKIZFHMIN-NEXT:    seqz a1, a1
 ; CHECKIZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECKIZFHMIN-NEXT:    and a0, a1, a0
@@ -1072,19 +1072,19 @@ define i64 @test_round_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI13_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI13_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI13_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB13_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rmm
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rmm
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rmm
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB13_2:
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IZFH-NEXT:    lui a0, 913408
-; RV32IZFH-NEXT:    fmv.w.x ft0, a0
-; RV32IZFH-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, a0
+; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixsfdi at plt
 ; RV32IZFH-NEXT:    lui a3, 524288
@@ -1093,8 +1093,8 @@ define i64 @test_round_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    lui a1, 524288
 ; RV32IZFH-NEXT:  .LBB13_4:
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI13_1)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI13_1)(a2)
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI13_1)(a2)
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    beqz a2, .LBB13_6
 ; RV32IZFH-NEXT:  # %bb.5:
 ; RV32IZFH-NEXT:    addi a1, a3, -1
@@ -1125,26 +1125,26 @@ define i64 @test_round_si64(half %x) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: test_round_si64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB13_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB13_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, fa5
 ; RV32IZFHMIN-NEXT:    lui a0, 913408
-; RV32IZFHMIN-NEXT:    fmv.w.x ft0, a0
-; RV32IZFHMIN-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
+; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
 ; RV32IZFHMIN-NEXT:    lui a3, 524288
@@ -1153,8 +1153,8 @@ define i64 @test_round_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    lui a1, 524288
 ; RV32IZFHMIN-NEXT:  .LBB13_4:
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI13_0)
-; RV32IZFHMIN-NEXT:    flw ft0, %lo(.LCPI13_0)(a2)
-; RV32IZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI13_0)(a2)
+; RV32IZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFHMIN-NEXT:    beqz a2, .LBB13_6
 ; RV32IZFHMIN-NEXT:  # %bb.5:
 ; RV32IZFHMIN-NEXT:    addi a1, a3, -1
@@ -1176,21 +1176,21 @@ define i64 @test_round_si64(half %x) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: test_round_si64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB13_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB13_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
@@ -1212,21 +1212,21 @@ define signext i32 @test_round_ui32(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_round_ui32:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB14_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB14_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV32IZFHMIN-NEXT:    seqz a1, a1
 ; RV32IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV32IZFHMIN-NEXT:    and a0, a1, a0
@@ -1234,21 +1234,21 @@ define signext i32 @test_round_ui32(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_round_ui32:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB14_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB14_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a0, a1
@@ -1266,25 +1266,25 @@ define i64 @test_round_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI15_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI15_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI15_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB15_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rmm
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rmm
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rmm
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB15_2:
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
-; RV32IZFH-NEXT:    fmv.w.x ft0, zero
-; RV32IZFH-NEXT:    fle.s a0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, zero
+; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixunssfdi at plt
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI15_1)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI15_1)(a2)
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI15_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    neg a2, a2
 ; RV32IZFH-NEXT:    or a0, a2, a0
 ; RV32IZFH-NEXT:    and a1, s0, a1
@@ -1306,32 +1306,32 @@ define i64 @test_round_ui64(half %x) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: test_round_ui64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB15_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB15_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, ft0
-; RV32IZFHMIN-NEXT:    fmv.w.x ft0, zero
-; RV32IZFHMIN-NEXT:    fle.s a0, ft0, fs0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, fa5
+; RV32IZFHMIN-NEXT:    fmv.w.x fa5, zero
+; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI15_0)
-; RV32IZFHMIN-NEXT:    flw ft0, %lo(.LCPI15_0)(a2)
+; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI15_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
-; RV32IZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg a2, a2
 ; RV32IZFHMIN-NEXT:    or a0, a2, a0
 ; RV32IZFHMIN-NEXT:    and a1, s0, a1
@@ -1344,21 +1344,21 @@ define i64 @test_round_ui64(half %x) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: test_round_ui64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB15_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB15_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
@@ -1380,21 +1380,21 @@ define signext i32 @test_roundeven_si32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_roundeven_si32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB16_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB16_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECKIZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECKIZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; CHECKIZFHMIN-NEXT:    seqz a1, a1
 ; CHECKIZFHMIN-NEXT:    addi a1, a1, -1
 ; CHECKIZFHMIN-NEXT:    and a0, a1, a0
@@ -1412,19 +1412,19 @@ define i64 @test_roundeven_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI17_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI17_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI17_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB17_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rne
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rne
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rne
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB17_2:
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
 ; RV32IZFH-NEXT:    lui a0, 913408
-; RV32IZFH-NEXT:    fmv.w.x ft0, a0
-; RV32IZFH-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, a0
+; RV32IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixsfdi at plt
 ; RV32IZFH-NEXT:    lui a3, 524288
@@ -1433,8 +1433,8 @@ define i64 @test_roundeven_si64(half %x) nounwind {
 ; RV32IZFH-NEXT:    lui a1, 524288
 ; RV32IZFH-NEXT:  .LBB17_4:
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI17_1)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI17_1)(a2)
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI17_1)(a2)
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    beqz a2, .LBB17_6
 ; RV32IZFH-NEXT:  # %bb.5:
 ; RV32IZFH-NEXT:    addi a1, a3, -1
@@ -1465,26 +1465,26 @@ define i64 @test_roundeven_si64(half %x) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: test_roundeven_si64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB17_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB17_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, fa5
 ; RV32IZFHMIN-NEXT:    lui a0, 913408
-; RV32IZFHMIN-NEXT:    fmv.w.x ft0, a0
-; RV32IZFHMIN-NEXT:    fle.s s0, ft0, fs0
+; RV32IZFHMIN-NEXT:    fmv.w.x fa5, a0
+; RV32IZFHMIN-NEXT:    fle.s s0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IZFHMIN-NEXT:    call __fixsfdi at plt
 ; RV32IZFHMIN-NEXT:    lui a3, 524288
@@ -1493,8 +1493,8 @@ define i64 @test_roundeven_si64(half %x) nounwind {
 ; RV32IZFHMIN-NEXT:    lui a1, 524288
 ; RV32IZFHMIN-NEXT:  .LBB17_4:
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI17_0)
-; RV32IZFHMIN-NEXT:    flw ft0, %lo(.LCPI17_0)(a2)
-; RV32IZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI17_0)(a2)
+; RV32IZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFHMIN-NEXT:    beqz a2, .LBB17_6
 ; RV32IZFHMIN-NEXT:  # %bb.5:
 ; RV32IZFHMIN-NEXT:    addi a1, a3, -1
@@ -1516,21 +1516,21 @@ define i64 @test_roundeven_si64(half %x) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_si64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB17_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB17_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0
@@ -1552,21 +1552,21 @@ define signext i32 @test_roundeven_ui32(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_roundeven_ui32:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB18_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB18_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV32IZFHMIN-NEXT:    seqz a1, a1
 ; RV32IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV32IZFHMIN-NEXT:    and a0, a1, a0
@@ -1574,21 +1574,21 @@ define signext i32 @test_roundeven_ui32(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_ui32:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB18_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB18_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a0, a1
@@ -1606,25 +1606,25 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
 ; RV32IZFH-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI19_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI19_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI19_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB19_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rne
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rne
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rne
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB19_2:
 ; RV32IZFH-NEXT:    fcvt.s.h fs0, fa0
-; RV32IZFH-NEXT:    fmv.w.x ft0, zero
-; RV32IZFH-NEXT:    fle.s a0, ft0, fs0
+; RV32IZFH-NEXT:    fmv.w.x fa5, zero
+; RV32IZFH-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFH-NEXT:    neg s0, a0
 ; RV32IZFH-NEXT:    fmv.s fa0, fs0
 ; RV32IZFH-NEXT:    call __fixunssfdi at plt
 ; RV32IZFH-NEXT:    lui a2, %hi(.LCPI19_1)
-; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI19_1)(a2)
+; RV32IZFH-NEXT:    flw fa5, %lo(.LCPI19_1)(a2)
 ; RV32IZFH-NEXT:    and a0, s0, a0
-; RV32IZFH-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFH-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFH-NEXT:    neg a2, a2
 ; RV32IZFH-NEXT:    or a0, a2, a0
 ; RV32IZFH-NEXT:    and a1, s0, a1
@@ -1646,32 +1646,32 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
 ;
 ; RV32IZFHMIN-LABEL: test_roundeven_ui64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB19_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB19_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, ft0
-; RV32IZFHMIN-NEXT:    fmv.w.x ft0, zero
-; RV32IZFHMIN-NEXT:    fle.s a0, ft0, fs0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fs0, fa5
+; RV32IZFHMIN-NEXT:    fmv.w.x fa5, zero
+; RV32IZFHMIN-NEXT:    fle.s a0, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg s0, a0
 ; RV32IZFHMIN-NEXT:    fmv.s fa0, fs0
 ; RV32IZFHMIN-NEXT:    call __fixunssfdi at plt
 ; RV32IZFHMIN-NEXT:    lui a2, %hi(.LCPI19_0)
-; RV32IZFHMIN-NEXT:    flw ft0, %lo(.LCPI19_0)(a2)
+; RV32IZFHMIN-NEXT:    flw fa5, %lo(.LCPI19_0)(a2)
 ; RV32IZFHMIN-NEXT:    and a0, s0, a0
-; RV32IZFHMIN-NEXT:    flt.s a2, ft0, fs0
+; RV32IZFHMIN-NEXT:    flt.s a2, fa5, fs0
 ; RV32IZFHMIN-NEXT:    neg a2, a2
 ; RV32IZFHMIN-NEXT:    or a0, a2, a0
 ; RV32IZFHMIN-NEXT:    and a1, s0, a1
@@ -1684,21 +1684,21 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_ui64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB19_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB19_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    feq.s a1, ft0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    feq.s a1, fa5, fa5
 ; RV64IZFHMIN-NEXT:    seqz a1, a1
 ; RV64IZFHMIN-NEXT:    addi a1, a1, -1
 ; RV64IZFHMIN-NEXT:    and a0, a1, a0

diff  --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll
index af0432bdf373..db1c8a41d1e2 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll
@@ -21,38 +21,38 @@ define signext i8 @test_floor_si8(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_floor_si8:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB0_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB0_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_floor_si8:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB0_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB0_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.floor.f16(half %x)
   %b = fptosi half %a to i8
@@ -72,38 +72,38 @@ define signext i16 @test_floor_si16(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_floor_si16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB1_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB1_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_floor_si16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB1_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB1_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.floor.f16(half %x)
   %b = fptosi half %a to i16
@@ -118,20 +118,20 @@ define signext i32 @test_floor_si32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_floor_si32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB2_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB2_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.floor.f16(half %x)
   %b = fptosi half %a to i32
@@ -146,14 +146,14 @@ define i64 @test_floor_si64(half %x) {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB3_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rdn
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rdn
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rdn
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB3_2:
 ; RV32IZFH-NEXT:    call __fixhfdi at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -167,22 +167,22 @@ define i64 @test_floor_si64(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_floor_si64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB3_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB3_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixhfdi at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -190,20 +190,20 @@ define i64 @test_floor_si64(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_floor_si64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB3_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB3_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.floor.f16(half %x)
   %b = fptosi half %a to i64
@@ -223,38 +223,38 @@ define zeroext i8 @test_floor_ui8(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_floor_ui8:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB4_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB4_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_floor_ui8:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB4_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB4_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.floor.f16(half %x)
   %b = fptoui half %a to i8
@@ -274,38 +274,38 @@ define zeroext i16 @test_floor_ui16(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_floor_ui16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB5_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB5_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_floor_ui16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB5_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB5_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.floor.f16(half %x)
   %b = fptoui half %a to i16
@@ -320,20 +320,20 @@ define signext i32 @test_floor_ui32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_floor_ui32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB6_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB6_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.floor.f16(half %x)
   %b = fptoui half %a to i32
@@ -348,14 +348,14 @@ define i64 @test_floor_ui64(half %x) {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI7_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI7_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI7_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB7_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rdn
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rdn
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rdn
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB7_2:
 ; RV32IZFH-NEXT:    call __fixunshfdi at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -369,22 +369,22 @@ define i64 @test_floor_ui64(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_floor_ui64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB7_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB7_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixunshfdi at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -392,20 +392,20 @@ define i64 @test_floor_ui64(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_floor_ui64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB7_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB7_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.floor.f16(half %x)
   %b = fptoui half %a to i64
@@ -425,38 +425,38 @@ define signext i8 @test_ceil_si8(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_ceil_si8:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB8_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB8_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_si8:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB8_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB8_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.ceil.f16(half %x)
   %b = fptosi half %a to i8
@@ -476,38 +476,38 @@ define signext i16 @test_ceil_si16(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_ceil_si16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB9_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB9_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_si16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB9_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB9_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.ceil.f16(half %x)
   %b = fptosi half %a to i16
@@ -522,20 +522,20 @@ define signext i32 @test_ceil_si32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_ceil_si32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB10_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB10_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.ceil.f16(half %x)
   %b = fptosi half %a to i32
@@ -550,14 +550,14 @@ define i64 @test_ceil_si64(half %x) {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI11_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI11_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI11_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB11_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rup
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rup
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rup
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB11_2:
 ; RV32IZFH-NEXT:    call __fixhfdi at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -571,22 +571,22 @@ define i64 @test_ceil_si64(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_ceil_si64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB11_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB11_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixhfdi at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -594,20 +594,20 @@ define i64 @test_ceil_si64(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_si64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB11_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB11_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.ceil.f16(half %x)
   %b = fptosi half %a to i64
@@ -627,38 +627,38 @@ define zeroext i8 @test_ceil_ui8(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_ceil_ui8:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB12_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB12_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_ui8:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB12_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB12_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.ceil.f16(half %x)
   %b = fptoui half %a to i8
@@ -678,38 +678,38 @@ define zeroext i16 @test_ceil_ui16(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_ceil_ui16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB13_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB13_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_ui16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB13_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB13_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.ceil.f16(half %x)
   %b = fptoui half %a to i16
@@ -724,20 +724,20 @@ define signext i32 @test_ceil_ui32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_ceil_ui32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB14_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB14_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.ceil.f16(half %x)
   %b = fptoui half %a to i32
@@ -752,14 +752,14 @@ define i64 @test_ceil_ui64(half %x) {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI15_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI15_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI15_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB15_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rup
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rup
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rup
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB15_2:
 ; RV32IZFH-NEXT:    call __fixunshfdi at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -773,22 +773,22 @@ define i64 @test_ceil_ui64(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_ceil_ui64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB15_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB15_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixunshfdi at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -796,20 +796,20 @@ define i64 @test_ceil_ui64(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_ceil_ui64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB15_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB15_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.ceil.f16(half %x)
   %b = fptoui half %a to i64
@@ -829,38 +829,38 @@ define signext i8 @test_trunc_si8(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_trunc_si8:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB16_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB16_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_si8:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB16_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB16_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.trunc.f16(half %x)
   %b = fptosi half %a to i8
@@ -880,38 +880,38 @@ define signext i16 @test_trunc_si16(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_trunc_si16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB17_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB17_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_si16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB17_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB17_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.trunc.f16(half %x)
   %b = fptosi half %a to i16
@@ -926,20 +926,20 @@ define signext i32 @test_trunc_si32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_trunc_si32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB18_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB18_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.trunc.f16(half %x)
   %b = fptosi half %a to i32
@@ -954,14 +954,14 @@ define i64 @test_trunc_si64(half %x) {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI19_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI19_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI19_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB19_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rtz
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rtz
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB19_2:
 ; RV32IZFH-NEXT:    call __fixhfdi at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -975,22 +975,22 @@ define i64 @test_trunc_si64(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_trunc_si64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB19_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB19_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixhfdi at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -998,20 +998,20 @@ define i64 @test_trunc_si64(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_si64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB19_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB19_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.trunc.f16(half %x)
   %b = fptosi half %a to i64
@@ -1031,38 +1031,38 @@ define zeroext i8 @test_trunc_ui8(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_trunc_ui8:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB20_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB20_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_ui8:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB20_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB20_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.trunc.f16(half %x)
   %b = fptoui half %a to i8
@@ -1082,38 +1082,38 @@ define zeroext i16 @test_trunc_ui16(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_trunc_ui16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB21_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB21_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_ui16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB21_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB21_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.trunc.f16(half %x)
   %b = fptoui half %a to i16
@@ -1128,20 +1128,20 @@ define signext i32 @test_trunc_ui32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_trunc_ui32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB22_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB22_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.trunc.f16(half %x)
   %b = fptoui half %a to i32
@@ -1156,14 +1156,14 @@ define i64 @test_trunc_ui64(half %x) {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI23_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI23_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI23_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB23_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rtz
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rtz
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB23_2:
 ; RV32IZFH-NEXT:    call __fixunshfdi at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1177,22 +1177,22 @@ define i64 @test_trunc_ui64(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_trunc_ui64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB23_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB23_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixunshfdi at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1200,20 +1200,20 @@ define i64 @test_trunc_ui64(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_trunc_ui64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB23_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB23_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.trunc.f16(half %x)
   %b = fptoui half %a to i64
@@ -1233,38 +1233,38 @@ define signext i8 @test_round_si8(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_round_si8:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB24_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB24_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_round_si8:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB24_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB24_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.round.f16(half %x)
   %b = fptosi half %a to i8
@@ -1284,38 +1284,38 @@ define signext i16 @test_round_si16(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_round_si16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB25_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB25_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_round_si16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB25_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB25_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.round.f16(half %x)
   %b = fptosi half %a to i16
@@ -1330,20 +1330,20 @@ define signext i32 @test_round_si32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_round_si32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB26_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB26_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.round.f16(half %x)
   %b = fptosi half %a to i32
@@ -1358,14 +1358,14 @@ define i64 @test_round_si64(half %x) {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI27_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI27_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI27_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB27_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rmm
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rmm
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rmm
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB27_2:
 ; RV32IZFH-NEXT:    call __fixhfdi at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1379,22 +1379,22 @@ define i64 @test_round_si64(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_round_si64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB27_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB27_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixhfdi at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1402,20 +1402,20 @@ define i64 @test_round_si64(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_round_si64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB27_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB27_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.round.f16(half %x)
   %b = fptosi half %a to i64
@@ -1435,38 +1435,38 @@ define zeroext i8 @test_round_ui8(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_round_ui8:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB28_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB28_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_round_ui8:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB28_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB28_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.round.f16(half %x)
   %b = fptoui half %a to i8
@@ -1486,38 +1486,38 @@ define zeroext i16 @test_round_ui16(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_round_ui16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB29_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB29_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_round_ui16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB29_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB29_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.round.f16(half %x)
   %b = fptoui half %a to i16
@@ -1532,20 +1532,20 @@ define signext i32 @test_round_ui32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_round_ui32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB30_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB30_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.round.f16(half %x)
   %b = fptoui half %a to i32
@@ -1560,14 +1560,14 @@ define i64 @test_round_ui64(half %x) {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI31_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI31_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI31_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB31_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rmm
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rmm
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rmm
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB31_2:
 ; RV32IZFH-NEXT:    call __fixunshfdi at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1581,22 +1581,22 @@ define i64 @test_round_ui64(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_round_ui64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB31_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB31_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixunshfdi at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1604,20 +1604,20 @@ define i64 @test_round_ui64(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_round_ui64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB31_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB31_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.round.f16(half %x)
   %b = fptoui half %a to i64
@@ -1637,38 +1637,38 @@ define signext i8 @test_roundeven_si8(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_roundeven_si8:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB32_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB32_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_si8:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB32_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB32_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.roundeven.f16(half %x)
   %b = fptosi half %a to i8
@@ -1688,38 +1688,38 @@ define signext i16 @test_roundeven_si16(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_roundeven_si16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB33_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB33_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_si16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB33_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB33_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.roundeven.f16(half %x)
   %b = fptosi half %a to i16
@@ -1734,20 +1734,20 @@ define signext i32 @test_roundeven_si32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_roundeven_si32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB34_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB34_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.roundeven.f16(half %x)
   %b = fptosi half %a to i32
@@ -1762,14 +1762,14 @@ define i64 @test_roundeven_si64(half %x) {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI35_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI35_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI35_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB35_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rne
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rne
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rne
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB35_2:
 ; RV32IZFH-NEXT:    call __fixhfdi at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1783,22 +1783,22 @@ define i64 @test_roundeven_si64(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_roundeven_si64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB35_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB35_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixhfdi at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -1806,20 +1806,20 @@ define i64 @test_roundeven_si64(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_si64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB35_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB35_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.roundeven.f16(half %x)
   %b = fptosi half %a to i64
@@ -1839,38 +1839,38 @@ define zeroext i8 @test_roundeven_ui8(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_roundeven_ui8:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB36_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB36_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_ui8:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB36_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB36_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.roundeven.f16(half %x)
   %b = fptoui half %a to i8
@@ -1890,38 +1890,38 @@ define zeroext i16 @test_roundeven_ui16(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_roundeven_ui16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB37_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB37_2:
-; RV32IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_ui16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB37_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB37_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.roundeven.f16(half %x)
   %b = fptoui half %a to i16
@@ -1936,20 +1936,20 @@ define signext i32 @test_roundeven_ui32(half %x) {
 ;
 ; CHECKIZFHMIN-LABEL: test_roundeven_ui32:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB38_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB38_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.roundeven.f16(half %x)
   %b = fptoui half %a to i32
@@ -1964,14 +1964,14 @@ define i64 @test_roundeven_ui64(half %x) {
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    .cfi_offset ra, -4
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI39_0)
-; RV32IZFH-NEXT:    flh ft0, %lo(.LCPI39_0)(a0)
-; RV32IZFH-NEXT:    fabs.h ft1, fa0
-; RV32IZFH-NEXT:    flt.h a0, ft1, ft0
+; RV32IZFH-NEXT:    flh fa5, %lo(.LCPI39_0)(a0)
+; RV32IZFH-NEXT:    fabs.h fa4, fa0
+; RV32IZFH-NEXT:    flt.h a0, fa4, fa5
 ; RV32IZFH-NEXT:    beqz a0, .LBB39_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rne
-; RV32IZFH-NEXT:    fcvt.h.w ft0, a0, rne
-; RV32IZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; RV32IZFH-NEXT:    fcvt.h.w fa5, a0, rne
+; RV32IZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; RV32IZFH-NEXT:  .LBB39_2:
 ; RV32IZFH-NEXT:    call __fixunshfdi at plt
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1985,22 +1985,22 @@ define i64 @test_roundeven_ui64(half %x) {
 ;
 ; RV32IZFHMIN-LABEL: test_roundeven_ui64:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV32IZFHMIN-NEXT:    lui a0, 307200
-; RV32IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV32IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV32IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV32IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV32IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV32IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV32IZFHMIN-NEXT:    beqz a0, .LBB39_2
 ; RV32IZFHMIN-NEXT:  # %bb.1:
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV32IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV32IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV32IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV32IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV32IZFHMIN-NEXT:  .LBB39_2:
 ; RV32IZFHMIN-NEXT:    addi sp, sp, -16
 ; RV32IZFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; RV32IZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFHMIN-NEXT:    .cfi_offset ra, -4
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    call __fixunshfdi at plt
 ; RV32IZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFHMIN-NEXT:    addi sp, sp, 16
@@ -2008,20 +2008,20 @@ define i64 @test_roundeven_ui64(half %x) {
 ;
 ; RV64IZFHMIN-LABEL: test_roundeven_ui64:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; RV64IZFHMIN-NEXT:    lui a0, 307200
-; RV64IZFHMIN-NEXT:    fmv.w.x ft1, a0
-; RV64IZFHMIN-NEXT:    fabs.s ft2, ft0
-; RV64IZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; RV64IZFHMIN-NEXT:    fmv.w.x fa4, a0
+; RV64IZFHMIN-NEXT:    fabs.s fa3, fa5
+; RV64IZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; RV64IZFHMIN-NEXT:    beqz a0, .LBB39_2
 ; RV64IZFHMIN-NEXT:  # %bb.1:
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; RV64IZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; RV64IZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; RV64IZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; RV64IZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; RV64IZFHMIN-NEXT:  .LBB39_2:
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %a = call half @llvm.roundeven.f16(half %x)
   %b = fptoui half %a to i64
@@ -2053,31 +2053,31 @@ define half @test_floor_half(half %x) {
 ; CHECKIZFH-LABEL: test_floor_half:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI40_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI40_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI40_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB40_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0, rdn
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0, rdn
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0, rdn
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB40_2:
 ; CHECKIZFH-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: test_floor_half:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB40_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rdn
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rdn
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rdn
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rdn
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB40_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.floor.f16(half %x)
   ret half %a
@@ -2108,31 +2108,31 @@ define half @test_ceil_half(half %x) {
 ; CHECKIZFH-LABEL: test_ceil_half:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI41_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI41_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI41_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB41_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0, rup
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0, rup
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0, rup
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB41_2:
 ; CHECKIZFH-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: test_ceil_half:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB41_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rup
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rup
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rup
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rup
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB41_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.ceil.f16(half %x)
   ret half %a
@@ -2163,31 +2163,31 @@ define half @test_trunc_half(half %x) {
 ; CHECKIZFH-LABEL: test_trunc_half:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI42_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI42_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI42_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB42_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0, rtz
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0, rtz
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0, rtz
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB42_2:
 ; CHECKIZFH-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: test_trunc_half:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB42_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rtz
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rtz
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB42_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.trunc.f16(half %x)
   ret half %a
@@ -2218,31 +2218,31 @@ define half @test_round_half(half %x) {
 ; CHECKIZFH-LABEL: test_round_half:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI43_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI43_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI43_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB43_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0, rmm
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0, rmm
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0, rmm
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB43_2:
 ; CHECKIZFH-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: test_round_half:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB43_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rmm
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rmm
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB43_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.round.f16(half %x)
   ret half %a
@@ -2273,31 +2273,31 @@ define half @test_roundeven_half(half %x) {
 ; CHECKIZFH-LABEL: test_roundeven_half:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    lui a0, %hi(.LCPI44_0)
-; CHECKIZFH-NEXT:    flh ft0, %lo(.LCPI44_0)(a0)
-; CHECKIZFH-NEXT:    fabs.h ft1, fa0
-; CHECKIZFH-NEXT:    flt.h a0, ft1, ft0
+; CHECKIZFH-NEXT:    flh fa5, %lo(.LCPI44_0)(a0)
+; CHECKIZFH-NEXT:    fabs.h fa4, fa0
+; CHECKIZFH-NEXT:    flt.h a0, fa4, fa5
 ; CHECKIZFH-NEXT:    beqz a0, .LBB44_2
 ; CHECKIZFH-NEXT:  # %bb.1:
 ; CHECKIZFH-NEXT:    fcvt.w.h a0, fa0, rne
-; CHECKIZFH-NEXT:    fcvt.h.w ft0, a0, rne
-; CHECKIZFH-NEXT:    fsgnj.h fa0, ft0, fa0
+; CHECKIZFH-NEXT:    fcvt.h.w fa5, a0, rne
+; CHECKIZFH-NEXT:    fsgnj.h fa0, fa5, fa0
 ; CHECKIZFH-NEXT:  .LBB44_2:
 ; CHECKIZFH-NEXT:    ret
 ;
 ; CHECKIZFHMIN-LABEL: test_roundeven_half:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
 ; CHECKIZFHMIN-NEXT:    lui a0, 307200
-; CHECKIZFHMIN-NEXT:    fmv.w.x ft1, a0
-; CHECKIZFHMIN-NEXT:    fabs.s ft2, ft0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft2, ft1
+; CHECKIZFHMIN-NEXT:    fmv.w.x fa4, a0
+; CHECKIZFHMIN-NEXT:    fabs.s fa3, fa5
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa3, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB44_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, ft0, rne
-; CHECKIZFHMIN-NEXT:    fcvt.s.w ft1, a0, rne
-; CHECKIZFHMIN-NEXT:    fsgnj.s ft0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.w.s a0, fa5, rne
+; CHECKIZFHMIN-NEXT:    fcvt.s.w fa4, a0, rne
+; CHECKIZFHMIN-NEXT:    fsgnj.s fa5, fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB44_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %a = call half @llvm.roundeven.f16(half %x)
   ret half %a

diff  --git a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
index 43e284b677e2..53121caa2f76 100644
--- a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
@@ -35,14 +35,14 @@ define half @select_fcmp_oeq(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_oeq:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    bnez a0, .LBB1_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB1_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp oeq half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -61,14 +61,14 @@ define half @select_fcmp_ogt(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_ogt:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    bnez a0, .LBB2_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB2_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ogt half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -87,14 +87,14 @@ define half @select_fcmp_oge(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_oge:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    bnez a0, .LBB3_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB3_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp oge half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -113,14 +113,14 @@ define half @select_fcmp_olt(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_olt:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    bnez a0, .LBB4_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB4_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp olt half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -139,14 +139,14 @@ define half @select_fcmp_ole(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_ole:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fle.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fle.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    bnez a0, .LBB5_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB5_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ole half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -167,16 +167,16 @@ define half @select_fcmp_one(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_one:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    flt.s a1, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa5, fa4
+; CHECKIZFHMIN-NEXT:    flt.s a1, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    or a0, a1, a0
 ; CHECKIZFHMIN-NEXT:    bnez a0, .LBB6_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB6_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp one half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -197,16 +197,16 @@ define half @select_fcmp_ord(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_ord:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    feq.s a1, ft1, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    feq.s a1, fa4, fa4
 ; CHECKIZFHMIN-NEXT:    and a0, a1, a0
 ; CHECKIZFHMIN-NEXT:    bnez a0, .LBB7_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft1, ft0
+; CHECKIZFHMIN-NEXT:    fmv.s fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB7_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa4
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ord half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -227,16 +227,16 @@ define half @select_fcmp_ueq(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_ueq:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft0, ft1
-; CHECKIZFHMIN-NEXT:    flt.s a1, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa5, fa4
+; CHECKIZFHMIN-NEXT:    flt.s a1, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    or a0, a1, a0
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB8_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB8_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ueq half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -255,14 +255,14 @@ define half @select_fcmp_ugt(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_ugt:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fle.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fle.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB9_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB9_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ugt half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -281,14 +281,14 @@ define half @select_fcmp_uge(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_uge:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB10_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB10_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp uge half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -307,14 +307,14 @@ define half @select_fcmp_ult(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_ult:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB11_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB11_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ult half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -333,14 +333,14 @@ define half @select_fcmp_ule(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_ule:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    flt.s a0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    flt.s a0, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB12_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB12_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ule half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -359,14 +359,14 @@ define half @select_fcmp_une(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_une:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft0, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa5, fa4
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB13_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft0, ft1
+; CHECKIZFHMIN-NEXT:    fmv.s fa5, fa4
 ; CHECKIZFHMIN-NEXT:  .LBB13_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp une half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -387,16 +387,16 @@ define half @select_fcmp_uno(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_uno:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft0, ft0
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    feq.s a1, ft1, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa5, fa5
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    feq.s a1, fa4, fa4
 ; CHECKIZFHMIN-NEXT:    and a0, a1, a0
 ; CHECKIZFHMIN-NEXT:    beqz a0, .LBB14_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fmv.s ft1, ft0
+; CHECKIZFHMIN-NEXT:    fmv.s fa4, fa5
 ; CHECKIZFHMIN-NEXT:  .LBB14_2:
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa4
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp uno half %a, %b
   %2 = select i1 %1, half %a, half %b
@@ -429,9 +429,9 @@ define i32 @i32_select_fcmp_oeq(half %a, half %b, i32 %c, i32 %d) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: i32_select_fcmp_oeq:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    feq.s a2, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    feq.s a2, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    bnez a2, .LBB16_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
 ; CHECKIZFHMIN-NEXT:    mv a0, a1
@@ -452,9 +452,9 @@ define i32 @select_fcmp_oeq_1_2(half %a, half %b) {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_oeq_1_2:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    feq.s a0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    feq.s a0, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    li a1, 2
 ; CHECKIZFHMIN-NEXT:    sub a0, a1, a0
 ; CHECKIZFHMIN-NEXT:    ret
@@ -472,9 +472,9 @@ define signext i32 @select_fcmp_uge_negone_zero(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_uge_negone_zero:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    addi a0, a0, -1
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ugt half %a, %b
@@ -491,9 +491,9 @@ define signext i32 @select_fcmp_uge_1_2(half %a, half %b) nounwind {
 ;
 ; CHECKIZFHMIN-LABEL: select_fcmp_uge_1_2:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; CHECKIZFHMIN-NEXT:    fle.s a0, ft1, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; CHECKIZFHMIN-NEXT:    fle.s a0, fa4, fa5
 ; CHECKIZFHMIN-NEXT:    addi a0, a0, 1
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = fcmp ugt half %a, %b

diff  --git a/llvm/test/CodeGen/RISCV/half-select-icmp.ll b/llvm/test/CodeGen/RISCV/half-select-icmp.ll
index 128a9790a929..06c07baa9f1c 100644
--- a/llvm/test/CodeGen/RISCV/half-select-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-select-icmp.ll
@@ -21,12 +21,12 @@ define half @select_icmp_eq(i32 signext %a, i32 signext %b, half %c, half %d) {
 ; CHECKIZFHMIN:       # %bb.0:
 ; CHECKIZFHMIN-NEXT:    beq a0, a1, .LBB0_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
 ; CHECKIZFHMIN-NEXT:  .LBB0_2:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = icmp eq i32 %a, %b
   %2 = select i1 %1, half %c, half %d
@@ -46,12 +46,12 @@ define half @select_icmp_ne(i32 signext %a, i32 signext %b, half %c, half %d) {
 ; CHECKIZFHMIN:       # %bb.0:
 ; CHECKIZFHMIN-NEXT:    bne a0, a1, .LBB1_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
 ; CHECKIZFHMIN-NEXT:  .LBB1_2:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = icmp ne i32 %a, %b
   %2 = select i1 %1, half %c, half %d
@@ -71,12 +71,12 @@ define half @select_icmp_ugt(i32 signext %a, i32 signext %b, half %c, half %d) {
 ; CHECKIZFHMIN:       # %bb.0:
 ; CHECKIZFHMIN-NEXT:    bltu a1, a0, .LBB2_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
 ; CHECKIZFHMIN-NEXT:  .LBB2_2:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = icmp ugt i32 %a, %b
   %2 = select i1 %1, half %c, half %d
@@ -96,12 +96,12 @@ define half @select_icmp_uge(i32 signext %a, i32 signext %b, half %c, half %d) {
 ; CHECKIZFHMIN:       # %bb.0:
 ; CHECKIZFHMIN-NEXT:    bgeu a0, a1, .LBB3_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
 ; CHECKIZFHMIN-NEXT:  .LBB3_2:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = icmp uge i32 %a, %b
   %2 = select i1 %1, half %c, half %d
@@ -121,12 +121,12 @@ define half @select_icmp_ult(i32 signext %a, i32 signext %b, half %c, half %d) {
 ; CHECKIZFHMIN:       # %bb.0:
 ; CHECKIZFHMIN-NEXT:    bltu a0, a1, .LBB4_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
 ; CHECKIZFHMIN-NEXT:  .LBB4_2:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = icmp ult i32 %a, %b
   %2 = select i1 %1, half %c, half %d
@@ -146,12 +146,12 @@ define half @select_icmp_ule(i32 signext %a, i32 signext %b, half %c, half %d) {
 ; CHECKIZFHMIN:       # %bb.0:
 ; CHECKIZFHMIN-NEXT:    bgeu a1, a0, .LBB5_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
 ; CHECKIZFHMIN-NEXT:  .LBB5_2:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = icmp ule i32 %a, %b
   %2 = select i1 %1, half %c, half %d
@@ -171,12 +171,12 @@ define half @select_icmp_sgt(i32 signext %a, i32 signext %b, half %c, half %d) {
 ; CHECKIZFHMIN:       # %bb.0:
 ; CHECKIZFHMIN-NEXT:    blt a1, a0, .LBB6_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
 ; CHECKIZFHMIN-NEXT:  .LBB6_2:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = icmp sgt i32 %a, %b
   %2 = select i1 %1, half %c, half %d
@@ -196,12 +196,12 @@ define half @select_icmp_sge(i32 signext %a, i32 signext %b, half %c, half %d) {
 ; CHECKIZFHMIN:       # %bb.0:
 ; CHECKIZFHMIN-NEXT:    bge a0, a1, .LBB7_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
 ; CHECKIZFHMIN-NEXT:  .LBB7_2:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = icmp sge i32 %a, %b
   %2 = select i1 %1, half %c, half %d
@@ -221,12 +221,12 @@ define half @select_icmp_slt(i32 signext %a, i32 signext %b, half %c, half %d) {
 ; CHECKIZFHMIN:       # %bb.0:
 ; CHECKIZFHMIN-NEXT:    blt a0, a1, .LBB8_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
 ; CHECKIZFHMIN-NEXT:  .LBB8_2:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = icmp slt i32 %a, %b
   %2 = select i1 %1, half %c, half %d
@@ -246,12 +246,12 @@ define half @select_icmp_sle(i32 signext %a, i32 signext %b, half %c, half %d) {
 ; CHECKIZFHMIN:       # %bb.0:
 ; CHECKIZFHMIN-NEXT:    bge a1, a0, .LBB9_2
 ; CHECKIZFHMIN-NEXT:  # %bb.1:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
 ; CHECKIZFHMIN-NEXT:  .LBB9_2:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = icmp sle i32 %a, %b
   %2 = select i1 %1, half %c, half %d

diff  --git a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
index 7c7baa0f5aa6..71769a800c06 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
@@ -12,13 +12,13 @@ define double @constraint_f_double(double %a) nounwind {
 ; RV32F-NEXT:    addi sp, sp, -16
 ; RV32F-NEXT:    sw a0, 8(sp)
 ; RV32F-NEXT:    sw a1, 12(sp)
-; RV32F-NEXT:    fld ft0, 8(sp)
+; RV32F-NEXT:    fld fa5, 8(sp)
 ; RV32F-NEXT:    lui a0, %hi(gd)
-; RV32F-NEXT:    fld ft1, %lo(gd)(a0)
+; RV32F-NEXT:    fld fa4, %lo(gd)(a0)
 ; RV32F-NEXT:    #APP
-; RV32F-NEXT:    fadd.d ft0, ft0, ft1
+; RV32F-NEXT:    fadd.d fa5, fa5, fa4
 ; RV32F-NEXT:    #NO_APP
-; RV32F-NEXT:    fsd ft0, 8(sp)
+; RV32F-NEXT:    fsd fa5, 8(sp)
 ; RV32F-NEXT:    lw a0, 8(sp)
 ; RV32F-NEXT:    lw a1, 12(sp)
 ; RV32F-NEXT:    addi sp, sp, 16
@@ -27,12 +27,12 @@ define double @constraint_f_double(double %a) nounwind {
 ; RV64F-LABEL: constraint_f_double:
 ; RV64F:       # %bb.0:
 ; RV64F-NEXT:    lui a1, %hi(gd)
-; RV64F-NEXT:    fld ft0, %lo(gd)(a1)
-; RV64F-NEXT:    fmv.d.x ft1, a0
+; RV64F-NEXT:    fld fa5, %lo(gd)(a1)
+; RV64F-NEXT:    fmv.d.x fa4, a0
 ; RV64F-NEXT:    #APP
-; RV64F-NEXT:    fadd.d ft0, ft1, ft0
+; RV64F-NEXT:    fadd.d fa5, fa4, fa5
 ; RV64F-NEXT:    #NO_APP
-; RV64F-NEXT:    fmv.x.d a0, ft0
+; RV64F-NEXT:    fmv.x.d a0, fa5
 ; RV64F-NEXT:    ret
   %1 = load double, ptr @gd
   %2 = tail call double asm "fadd.d $0, $1, $2", "=f,f,f"(double %a, double %1)
@@ -79,8 +79,8 @@ define double @constraint_gpr(double %x) {
 ; RV32F-NEXT:    .cfi_def_cfa_offset 32
 ; RV32F-NEXT:    sw a0, 8(sp)
 ; RV32F-NEXT:    sw a1, 12(sp)
-; RV32F-NEXT:    fld ft0, 8(sp)
-; RV32F-NEXT:    fsd ft0, 24(sp)
+; RV32F-NEXT:    fld fa5, 8(sp)
+; RV32F-NEXT:    fsd fa5, 24(sp)
 ; RV32F-NEXT:    lw a0, 24(sp)
 ; RV32F-NEXT:    lw a1, 28(sp)
 ; RV32F-NEXT:    #APP
@@ -88,8 +88,8 @@ define double @constraint_gpr(double %x) {
 ; RV32F-NEXT:    #NO_APP
 ; RV32F-NEXT:    sw a1, 20(sp)
 ; RV32F-NEXT:    sw a0, 16(sp)
-; RV32F-NEXT:    fld ft0, 16(sp)
-; RV32F-NEXT:    fsd ft0, 8(sp)
+; RV32F-NEXT:    fld fa5, 16(sp)
+; RV32F-NEXT:    fsd fa5, 8(sp)
 ; RV32F-NEXT:    lw a0, 8(sp)
 ; RV32F-NEXT:    lw a1, 12(sp)
 ; RV32F-NEXT:    addi sp, sp, 32

diff  --git a/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
index 1b6897f0c9f9..91922cd236df 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
@@ -15,23 +15,23 @@ define float @constraint_f_float(float %a) nounwind {
 ; RV32F-LABEL: constraint_f_float:
 ; RV32F:       # %bb.0:
 ; RV32F-NEXT:    lui a1, %hi(gf)
-; RV32F-NEXT:    flw ft0, %lo(gf)(a1)
-; RV32F-NEXT:    fmv.w.x ft1, a0
+; RV32F-NEXT:    flw fa5, %lo(gf)(a1)
+; RV32F-NEXT:    fmv.w.x fa4, a0
 ; RV32F-NEXT:    #APP
-; RV32F-NEXT:    fadd.s ft0, ft1, ft0
+; RV32F-NEXT:    fadd.s fa5, fa4, fa5
 ; RV32F-NEXT:    #NO_APP
-; RV32F-NEXT:    fmv.x.w a0, ft0
+; RV32F-NEXT:    fmv.x.w a0, fa5
 ; RV32F-NEXT:    ret
 ;
 ; RV64F-LABEL: constraint_f_float:
 ; RV64F:       # %bb.0:
 ; RV64F-NEXT:    lui a1, %hi(gf)
-; RV64F-NEXT:    flw ft0, %lo(gf)(a1)
-; RV64F-NEXT:    fmv.w.x ft1, a0
+; RV64F-NEXT:    flw fa5, %lo(gf)(a1)
+; RV64F-NEXT:    fmv.w.x fa4, a0
 ; RV64F-NEXT:    #APP
-; RV64F-NEXT:    fadd.s ft0, ft1, ft0
+; RV64F-NEXT:    fadd.s fa5, fa4, fa5
 ; RV64F-NEXT:    #NO_APP
-; RV64F-NEXT:    fmv.x.w a0, ft0
+; RV64F-NEXT:    fmv.x.w a0, fa5
 ; RV64F-NEXT:    ret
   %1 = load float, ptr @gf
   %2 = tail call float asm "fadd.s $0, $1, $2", "=f,f,f"(float %a, float %1)

diff  --git a/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
index 1ad0ed78a85f..8caf5956e7a7 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
@@ -14,36 +14,36 @@ define half @constraint_f_half(half %a) nounwind {
 ; RV32ZFH-LABEL: constraint_f_half:
 ; RV32ZFH:       # %bb.0:
 ; RV32ZFH-NEXT:    lui a0, %hi(gh)
-; RV32ZFH-NEXT:    flh ft0, %lo(gh)(a0)
+; RV32ZFH-NEXT:    flh fa5, %lo(gh)(a0)
 ; RV32ZFH-NEXT:    #APP
-; RV32ZFH-NEXT:    fadd.h fa0, fa0, ft0
+; RV32ZFH-NEXT:    fadd.h fa0, fa0, fa5
 ; RV32ZFH-NEXT:    #NO_APP
 ; RV32ZFH-NEXT:    ret
 ;
 ; RV64ZFH-LABEL: constraint_f_half:
 ; RV64ZFH:       # %bb.0:
 ; RV64ZFH-NEXT:    lui a0, %hi(gh)
-; RV64ZFH-NEXT:    flh ft0, %lo(gh)(a0)
+; RV64ZFH-NEXT:    flh fa5, %lo(gh)(a0)
 ; RV64ZFH-NEXT:    #APP
-; RV64ZFH-NEXT:    fadd.h fa0, fa0, ft0
+; RV64ZFH-NEXT:    fadd.h fa0, fa0, fa5
 ; RV64ZFH-NEXT:    #NO_APP
 ; RV64ZFH-NEXT:    ret
 ;
 ; RV32DZFH-LABEL: constraint_f_half:
 ; RV32DZFH:       # %bb.0:
 ; RV32DZFH-NEXT:    lui a0, %hi(gh)
-; RV32DZFH-NEXT:    flh ft0, %lo(gh)(a0)
+; RV32DZFH-NEXT:    flh fa5, %lo(gh)(a0)
 ; RV32DZFH-NEXT:    #APP
-; RV32DZFH-NEXT:    fadd.h fa0, fa0, ft0
+; RV32DZFH-NEXT:    fadd.h fa0, fa0, fa5
 ; RV32DZFH-NEXT:    #NO_APP
 ; RV32DZFH-NEXT:    ret
 ;
 ; RV64DZFH-LABEL: constraint_f_half:
 ; RV64DZFH:       # %bb.0:
 ; RV64DZFH-NEXT:    lui a0, %hi(gh)
-; RV64DZFH-NEXT:    flh ft0, %lo(gh)(a0)
+; RV64DZFH-NEXT:    flh fa5, %lo(gh)(a0)
 ; RV64DZFH-NEXT:    #APP
-; RV64DZFH-NEXT:    fadd.h fa0, fa0, ft0
+; RV64DZFH-NEXT:    fadd.h fa0, fa0, fa5
 ; RV64DZFH-NEXT:    #NO_APP
 ; RV64DZFH-NEXT:    ret
   %1 = load half, ptr @gh

diff  --git a/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll b/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
index 2278e7a77c5b..d1f00db1c23b 100644
--- a/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
+++ b/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
@@ -211,18 +211,18 @@ define void @foo_float() nounwind #0 {
 ; CHECK-RV32IF:       # %bb.0:
 ; CHECK-RV32IF-NEXT:    addi sp, sp, -16
 ; CHECK-RV32IF-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
-; CHECK-RV32IF-NEXT:    fsw ft0, 8(sp) # 4-byte Folded Spill
-; CHECK-RV32IF-NEXT:    fsw ft1, 4(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa4, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa5, 4(sp) # 4-byte Folded Spill
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(e)
-; CHECK-RV32IF-NEXT:    flw ft0, %lo(e)(a0)
+; CHECK-RV32IF-NEXT:    flw fa5, %lo(e)(a0)
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(f)
-; CHECK-RV32IF-NEXT:    flw ft1, %lo(f)(a0)
-; CHECK-RV32IF-NEXT:    fadd.s ft0, ft0, ft1
+; CHECK-RV32IF-NEXT:    flw fa4, %lo(f)(a0)
+; CHECK-RV32IF-NEXT:    fadd.s fa5, fa5, fa4
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(d)
-; CHECK-RV32IF-NEXT:    fsw ft0, %lo(d)(a0)
+; CHECK-RV32IF-NEXT:    fsw fa5, %lo(d)(a0)
 ; CHECK-RV32IF-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
-; CHECK-RV32IF-NEXT:    flw ft0, 8(sp) # 4-byte Folded Reload
-; CHECK-RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa4, 8(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa5, 4(sp) # 4-byte Folded Reload
 ; CHECK-RV32IF-NEXT:    addi sp, sp, 16
 ; CHECK-RV32IF-NEXT:    mret
 ;
@@ -230,18 +230,18 @@ define void @foo_float() nounwind #0 {
 ; CHECK-RV32IFD:       # %bb.0:
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, -32
 ; CHECK-RV32IFD-NEXT:    sw a0, 28(sp) # 4-byte Folded Spill
-; CHECK-RV32IFD-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
-; CHECK-RV32IFD-NEXT:    fsd ft1, 8(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd fa4, 16(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(e)
-; CHECK-RV32IFD-NEXT:    flw ft0, %lo(e)(a0)
+; CHECK-RV32IFD-NEXT:    flw fa5, %lo(e)(a0)
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(f)
-; CHECK-RV32IFD-NEXT:    flw ft1, %lo(f)(a0)
-; CHECK-RV32IFD-NEXT:    fadd.s ft0, ft0, ft1
+; CHECK-RV32IFD-NEXT:    flw fa4, %lo(f)(a0)
+; CHECK-RV32IFD-NEXT:    fadd.s fa5, fa5, fa4
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(d)
-; CHECK-RV32IFD-NEXT:    fsw ft0, %lo(d)(a0)
+; CHECK-RV32IFD-NEXT:    fsw fa5, %lo(d)(a0)
 ; CHECK-RV32IFD-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
-; CHECK-RV32IFD-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
-; CHECK-RV32IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld fa4, 16(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld fa5, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, 32
 ; CHECK-RV32IFD-NEXT:    mret
   %1 = load float, ptr @e
@@ -309,21 +309,21 @@ define void @foo_fp_float() nounwind #1 {
 ; CHECK-RV32IF-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; CHECK-RV32IF-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; CHECK-RV32IF-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
-; CHECK-RV32IF-NEXT:    fsw ft0, 16(sp) # 4-byte Folded Spill
-; CHECK-RV32IF-NEXT:    fsw ft1, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa4, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32IF-NEXT:    fsw fa5, 12(sp) # 4-byte Folded Spill
 ; CHECK-RV32IF-NEXT:    addi s0, sp, 32
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(e)
-; CHECK-RV32IF-NEXT:    flw ft0, %lo(e)(a0)
+; CHECK-RV32IF-NEXT:    flw fa5, %lo(e)(a0)
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(f)
-; CHECK-RV32IF-NEXT:    flw ft1, %lo(f)(a0)
-; CHECK-RV32IF-NEXT:    fadd.s ft0, ft0, ft1
+; CHECK-RV32IF-NEXT:    flw fa4, %lo(f)(a0)
+; CHECK-RV32IF-NEXT:    fadd.s fa5, fa5, fa4
 ; CHECK-RV32IF-NEXT:    lui a0, %hi(d)
-; CHECK-RV32IF-NEXT:    fsw ft0, %lo(d)(a0)
+; CHECK-RV32IF-NEXT:    fsw fa5, %lo(d)(a0)
 ; CHECK-RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; CHECK-RV32IF-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; CHECK-RV32IF-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
-; CHECK-RV32IF-NEXT:    flw ft0, 16(sp) # 4-byte Folded Reload
-; CHECK-RV32IF-NEXT:    flw ft1, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa4, 16(sp) # 4-byte Folded Reload
+; CHECK-RV32IF-NEXT:    flw fa5, 12(sp) # 4-byte Folded Reload
 ; CHECK-RV32IF-NEXT:    addi sp, sp, 32
 ; CHECK-RV32IF-NEXT:    mret
 ;
@@ -333,21 +333,21 @@ define void @foo_fp_float() nounwind #1 {
 ; CHECK-RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
-; CHECK-RV32IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
-; CHECK-RV32IFD-NEXT:    fsd ft1, 0(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd fa4, 8(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd fa5, 0(sp) # 8-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    addi s0, sp, 32
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(e)
-; CHECK-RV32IFD-NEXT:    flw ft0, %lo(e)(a0)
+; CHECK-RV32IFD-NEXT:    flw fa5, %lo(e)(a0)
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(f)
-; CHECK-RV32IFD-NEXT:    flw ft1, %lo(f)(a0)
-; CHECK-RV32IFD-NEXT:    fadd.s ft0, ft0, ft1
+; CHECK-RV32IFD-NEXT:    flw fa4, %lo(f)(a0)
+; CHECK-RV32IFD-NEXT:    fadd.s fa5, fa5, fa4
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(d)
-; CHECK-RV32IFD-NEXT:    fsw ft0, %lo(d)(a0)
+; CHECK-RV32IFD-NEXT:    fsw fa5, %lo(d)(a0)
 ; CHECK-RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
-; CHECK-RV32IFD-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
-; CHECK-RV32IFD-NEXT:    fld ft1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld fa4, 8(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld fa5, 0(sp) # 8-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, 32
 ; CHECK-RV32IFD-NEXT:    mret
   %1 = load float, ptr @e
@@ -526,18 +526,18 @@ define void @foo_double() nounwind #0 {
 ; CHECK-RV32IFD:       # %bb.0:
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, -32
 ; CHECK-RV32IFD-NEXT:    sw a0, 28(sp) # 4-byte Folded Spill
-; CHECK-RV32IFD-NEXT:    fsd ft0, 16(sp) # 8-byte Folded Spill
-; CHECK-RV32IFD-NEXT:    fsd ft1, 8(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd fa4, 16(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(h)
-; CHECK-RV32IFD-NEXT:    fld ft0, %lo(h)(a0)
+; CHECK-RV32IFD-NEXT:    fld fa5, %lo(h)(a0)
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(i)
-; CHECK-RV32IFD-NEXT:    fld ft1, %lo(i)(a0)
-; CHECK-RV32IFD-NEXT:    fadd.d ft0, ft0, ft1
+; CHECK-RV32IFD-NEXT:    fld fa4, %lo(i)(a0)
+; CHECK-RV32IFD-NEXT:    fadd.d fa5, fa5, fa4
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(g)
-; CHECK-RV32IFD-NEXT:    fsd ft0, %lo(g)(a0)
+; CHECK-RV32IFD-NEXT:    fsd fa5, %lo(g)(a0)
 ; CHECK-RV32IFD-NEXT:    lw a0, 28(sp) # 4-byte Folded Reload
-; CHECK-RV32IFD-NEXT:    fld ft0, 16(sp) # 8-byte Folded Reload
-; CHECK-RV32IFD-NEXT:    fld ft1, 8(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld fa4, 16(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld fa5, 8(sp) # 8-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, 32
 ; CHECK-RV32IFD-NEXT:    mret
   %1 = load double, ptr @h
@@ -723,21 +723,21 @@ define void @foo_fp_double() nounwind #1 {
 ; CHECK-RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
-; CHECK-RV32IFD-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
-; CHECK-RV32IFD-NEXT:    fsd ft1, 0(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd fa4, 8(sp) # 8-byte Folded Spill
+; CHECK-RV32IFD-NEXT:    fsd fa5, 0(sp) # 8-byte Folded Spill
 ; CHECK-RV32IFD-NEXT:    addi s0, sp, 32
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(h)
-; CHECK-RV32IFD-NEXT:    fld ft0, %lo(h)(a0)
+; CHECK-RV32IFD-NEXT:    fld fa5, %lo(h)(a0)
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(i)
-; CHECK-RV32IFD-NEXT:    fld ft1, %lo(i)(a0)
-; CHECK-RV32IFD-NEXT:    fadd.d ft0, ft0, ft1
+; CHECK-RV32IFD-NEXT:    fld fa4, %lo(i)(a0)
+; CHECK-RV32IFD-NEXT:    fadd.d fa5, fa5, fa4
 ; CHECK-RV32IFD-NEXT:    lui a0, %hi(g)
-; CHECK-RV32IFD-NEXT:    fsd ft0, %lo(g)(a0)
+; CHECK-RV32IFD-NEXT:    fsd fa5, %lo(g)(a0)
 ; CHECK-RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
-; CHECK-RV32IFD-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
-; CHECK-RV32IFD-NEXT:    fld ft1, 0(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld fa4, 8(sp) # 8-byte Folded Reload
+; CHECK-RV32IFD-NEXT:    fld fa5, 0(sp) # 8-byte Folded Reload
 ; CHECK-RV32IFD-NEXT:    addi sp, sp, 32
 ; CHECK-RV32IFD-NEXT:    mret
   %1 = load double, ptr @h

diff  --git a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
index 9389a4b5bc8a..489d44256ba8 100644
--- a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
+++ b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
@@ -346,8 +346,8 @@ define i64 @llround_f32(float %a) nounwind {
 ;
 ; RV64IFD-LP64-LABEL: llround_f32:
 ; RV64IFD-LP64:       # %bb.0:
-; RV64IFD-LP64-NEXT:    fmv.w.x ft0, a0
-; RV64IFD-LP64-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IFD-LP64-NEXT:    fmv.w.x fa5, a0
+; RV64IFD-LP64-NEXT:    fcvt.l.s a0, fa5, rmm
 ; RV64IFD-LP64-NEXT:    ret
 ;
 ; RV64I-LP64-LABEL: llround_f32:
@@ -500,8 +500,8 @@ define i64 @llround_f64(double %a) nounwind {
 ;
 ; RV64IFD-LP64-LABEL: llround_f64:
 ; RV64IFD-LP64:       # %bb.0:
-; RV64IFD-LP64-NEXT:    fmv.d.x ft0, a0
-; RV64IFD-LP64-NEXT:    fcvt.l.d a0, ft0, rmm
+; RV64IFD-LP64-NEXT:    fmv.d.x fa5, a0
+; RV64IFD-LP64-NEXT:    fcvt.l.d a0, fa5, rmm
 ; RV64IFD-LP64-NEXT:    ret
 ;
 ; RV64I-LP64-LABEL: llround_f64:

diff  --git a/llvm/test/CodeGen/RISCV/machine-combiner.ll b/llvm/test/CodeGen/RISCV/machine-combiner.ll
index 197abcfb0e1e..7c1792e2f101 100644
--- a/llvm/test/CodeGen/RISCV/machine-combiner.ll
+++ b/llvm/test/CodeGen/RISCV/machine-combiner.ll
@@ -10,9 +10,9 @@
 define double @test_reassoc_fadd1(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fadd1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft1, fa2, fa3
-; CHECK-NEXT:    fadd.d fa0, ft0, ft1
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fadd.d fa4, fa2, fa3
+; CHECK-NEXT:    fadd.d fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd nsz reassoc double %t0, %a2
@@ -23,9 +23,9 @@ define double @test_reassoc_fadd1(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fadd2(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fadd2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft1, fa2, fa3
-; CHECK-NEXT:    fadd.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fadd.d fa4, fa2, fa3
+; CHECK-NEXT:    fadd.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd nsz reassoc double %a2, %t0
@@ -36,9 +36,9 @@ define double @test_reassoc_fadd2(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fadd3(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fadd3:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft1, fa3, fa2
-; CHECK-NEXT:    fadd.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fadd.d fa4, fa3, fa2
+; CHECK-NEXT:    fadd.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd nsz reassoc double %t0, %a2
@@ -49,9 +49,9 @@ define double @test_reassoc_fadd3(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fadd4(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fadd4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft1, fa3, fa2
-; CHECK-NEXT:    fadd.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fadd.d fa4, fa3, fa2
+; CHECK-NEXT:    fadd.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd nsz reassoc double %a2, %t0
@@ -62,9 +62,9 @@ define double @test_reassoc_fadd4(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fmul1(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fmul1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmul.d ft0, fa0, fa1
-; CHECK-NEXT:    fmul.d ft1, fa2, fa3
-; CHECK-NEXT:    fmul.d fa0, ft0, ft1
+; CHECK-NEXT:    fmul.d fa5, fa0, fa1
+; CHECK-NEXT:    fmul.d fa4, fa2, fa3
+; CHECK-NEXT:    fmul.d fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = fmul nsz reassoc double %a0, %a1
   %t1 = fmul nsz reassoc double %t0, %a2
@@ -75,9 +75,9 @@ define double @test_reassoc_fmul1(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fmul2(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fmul2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmul.d ft0, fa0, fa1
-; CHECK-NEXT:    fmul.d ft1, fa2, fa3
-; CHECK-NEXT:    fmul.d fa0, ft1, ft0
+; CHECK-NEXT:    fmul.d fa5, fa0, fa1
+; CHECK-NEXT:    fmul.d fa4, fa2, fa3
+; CHECK-NEXT:    fmul.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fmul nsz reassoc double %a0, %a1
   %t1 = fmul nsz reassoc double %a2, %t0
@@ -88,9 +88,9 @@ define double @test_reassoc_fmul2(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fmul3(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fmul3:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmul.d ft0, fa0, fa1
-; CHECK-NEXT:    fmul.d ft1, fa3, fa2
-; CHECK-NEXT:    fmul.d fa0, ft1, ft0
+; CHECK-NEXT:    fmul.d fa5, fa0, fa1
+; CHECK-NEXT:    fmul.d fa4, fa3, fa2
+; CHECK-NEXT:    fmul.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fmul nsz reassoc double %a0, %a1
   %t1 = fmul nsz reassoc double %t0, %a2
@@ -101,9 +101,9 @@ define double @test_reassoc_fmul3(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fmul4(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fmul4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmul.d ft0, fa0, fa1
-; CHECK-NEXT:    fmul.d ft1, fa3, fa2
-; CHECK-NEXT:    fmul.d fa0, ft1, ft0
+; CHECK-NEXT:    fmul.d fa5, fa0, fa1
+; CHECK-NEXT:    fmul.d fa4, fa3, fa2
+; CHECK-NEXT:    fmul.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fmul nsz reassoc double %a0, %a1
   %t1 = fmul nsz reassoc double %a2, %t0
@@ -114,12 +114,12 @@ define double @test_reassoc_fmul4(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_big1(double %a0, double %a1, double %a2, double %a3, double %a4, double %a5, double %a6) {
 ; CHECK-LABEL: test_reassoc_big1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft1, fa2, fa3
-; CHECK-NEXT:    fadd.d ft2, fa4, fa5
-; CHECK-NEXT:    fadd.d ft0, ft0, ft1
-; CHECK-NEXT:    fadd.d ft1, ft2, fa6
-; CHECK-NEXT:    fadd.d fa0, ft0, ft1
+; CHECK-NEXT:    fadd.d fa1, fa0, fa1
+; CHECK-NEXT:    fadd.d fa3, fa2, fa3
+; CHECK-NEXT:    fadd.d fa5, fa4, fa5
+; CHECK-NEXT:    fadd.d fa4, fa1, fa3
+; CHECK-NEXT:    fadd.d fa5, fa5, fa6
+; CHECK-NEXT:    fadd.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd nsz reassoc double %t0, %a2
@@ -133,18 +133,18 @@ define double @test_reassoc_big1(double %a0, double %a1, double %a2, double %a3,
 define double @test_reassoc_big2(double %a0, double %a1, i32 %a2, double %a3, i32 %a4, double %a5) {
 ; CHECK-LABEL: test_reassoc_big2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fsub.d ft1, fa3, fa2
-; CHECK-NEXT:    fadd.d ft2, fa2, fa1
-; CHECK-NEXT:    fcvt.d.w ft3, a0
-; CHECK-NEXT:    fcvt.d.w ft4, a1
-; CHECK-NEXT:    fmul.d ft3, fa2, ft3
-; CHECK-NEXT:    fmul.d ft4, ft4, fa1
-; CHECK-NEXT:    fsub.d ft0, ft1, ft0
-; CHECK-NEXT:    fmul.d ft1, fa0, ft2
-; CHECK-NEXT:    fmul.d ft2, ft4, ft3
-; CHECK-NEXT:    fmul.d ft0, ft0, ft1
-; CHECK-NEXT:    fmul.d fa0, ft0, ft2
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fsub.d fa4, fa3, fa2
+; CHECK-NEXT:    fadd.d fa3, fa2, fa1
+; CHECK-NEXT:    fcvt.d.w ft0, a0
+; CHECK-NEXT:    fcvt.d.w ft1, a1
+; CHECK-NEXT:    fmul.d fa2, fa2, ft0
+; CHECK-NEXT:    fmul.d fa1, ft1, fa1
+; CHECK-NEXT:    fsub.d fa5, fa4, fa5
+; CHECK-NEXT:    fmul.d fa4, fa0, fa3
+; CHECK-NEXT:    fmul.d fa3, fa1, fa2
+; CHECK-NEXT:    fmul.d fa5, fa5, fa4
+; CHECK-NEXT:    fmul.d fa0, fa5, fa3
 ; CHECK-NEXT:    ret
   %cvt1 = sitofp i32 %a2 to double
   %cvt2 = sitofp i32 %a4 to double
@@ -165,9 +165,9 @@ define double @test_reassoc_big2(double %a0, double %a1, i32 %a2, double %a3, i3
 define double @test_reassoc_fadd_flags_1(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fadd_flags_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft0, ft0, fa2
-; CHECK-NEXT:    fadd.d fa0, ft0, fa3
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fadd.d fa5, fa5, fa2
+; CHECK-NEXT:    fadd.d fa0, fa5, fa3
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd double %t0, %a2
@@ -179,9 +179,9 @@ define double @test_reassoc_fadd_flags_1(double %a0, double %a1, double %a2, dou
 define double @test_reassoc_fadd_flags_2(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fadd_flags_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft0, ft0, fa2
-; CHECK-NEXT:    fadd.d fa0, ft0, fa3
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fadd.d fa5, fa5, fa2
+; CHECK-NEXT:    fadd.d fa0, fa5, fa3
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd nsz reassoc double %t0, %a2
@@ -192,9 +192,9 @@ define double @test_reassoc_fadd_flags_2(double %a0, double %a1, double %a2, dou
 define double @test_fmadd1(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_fmadd1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmadd.d ft0, fa0, fa1, fa2
-; CHECK-NEXT:    fmadd.d ft1, fa0, fa1, fa3
-; CHECK-NEXT:    fadd.d fa0, ft0, ft1
+; CHECK-NEXT:    fmadd.d fa5, fa0, fa1, fa2
+; CHECK-NEXT:    fmadd.d fa4, fa0, fa1, fa3
+; CHECK-NEXT:    fadd.d fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = fmul contract double %a0, %a1
   %t1 = fadd contract double %t0, %a2
@@ -206,9 +206,9 @@ define double @test_fmadd1(double %a0, double %a1, double %a2, double %a3) {
 define double @test_fmadd2(double %a0, double %a1, double %a2) {
 ; CHECK-LABEL: test_fmadd2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmul.d ft0, fa0, fa1
-; CHECK-NEXT:    fmadd.d ft1, fa0, fa1, fa2
-; CHECK-NEXT:    fdiv.d fa0, ft1, ft0
+; CHECK-NEXT:    fmul.d fa5, fa0, fa1
+; CHECK-NEXT:    fmadd.d fa4, fa0, fa1, fa2
+; CHECK-NEXT:    fdiv.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fmul contract double %a0, %a1
   %t1 = fadd contract double %t0, %a2
@@ -219,9 +219,9 @@ define double @test_fmadd2(double %a0, double %a1, double %a2) {
 define double @test_fmsub(double %a0, double %a1, double %a2) {
 ; CHECK-LABEL: test_fmsub:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmul.d ft0, fa0, fa1
-; CHECK-NEXT:    fmsub.d ft1, fa0, fa1, fa2
-; CHECK-NEXT:    fdiv.d fa0, ft1, ft0
+; CHECK-NEXT:    fmul.d fa5, fa0, fa1
+; CHECK-NEXT:    fmsub.d fa4, fa0, fa1, fa2
+; CHECK-NEXT:    fdiv.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fmul contract double %a0, %a1
   %t1 = fsub contract double %t0, %a2
@@ -232,9 +232,9 @@ define double @test_fmsub(double %a0, double %a1, double %a2) {
 define double @test_fnmsub(double %a0, double %a1, double %a2) {
 ; CHECK-LABEL: test_fnmsub:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmul.d ft0, fa0, fa1
-; CHECK-NEXT:    fnmsub.d ft1, fa0, fa1, fa2
-; CHECK-NEXT:    fdiv.d fa0, ft1, ft0
+; CHECK-NEXT:    fmul.d fa5, fa0, fa1
+; CHECK-NEXT:    fnmsub.d fa4, fa0, fa1, fa2
+; CHECK-NEXT:    fdiv.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fmul contract double %a0, %a1
   %t1 = fsub contract double %a2, %t0
@@ -245,9 +245,9 @@ define double @test_fnmsub(double %a0, double %a1, double %a2) {
 define double @test_reassoc_fsub1(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fsub.d ft1, fa2, fa3
-; CHECK-NEXT:    fadd.d fa0, ft0, ft1
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fsub.d fa4, fa2, fa3
+; CHECK-NEXT:    fadd.d fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd nsz reassoc double %t0, %a2
@@ -258,9 +258,9 @@ define double @test_reassoc_fsub1(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fsub2(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fsub.d ft1, fa2, fa3
-; CHECK-NEXT:    fsub.d fa0, ft0, ft1
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fsub.d fa4, fa2, fa3
+; CHECK-NEXT:    fsub.d fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fsub nsz reassoc double %t0, %a2
@@ -271,9 +271,9 @@ define double @test_reassoc_fsub2(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fsub3(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub3:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft1, fa2, fa3
-; CHECK-NEXT:    fsub.d fa0, ft0, ft1
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fadd.d fa4, fa2, fa3
+; CHECK-NEXT:    fsub.d fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fsub nsz reassoc double %t0, %a2
@@ -284,9 +284,9 @@ define double @test_reassoc_fsub3(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fsub4(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fsub.d ft1, fa2, fa3
-; CHECK-NEXT:    fadd.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fsub.d fa4, fa2, fa3
+; CHECK-NEXT:    fadd.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd nsz reassoc double %a2, %t0
@@ -297,9 +297,9 @@ define double @test_reassoc_fsub4(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fsub5(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub5:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft1, fa2, fa3
-; CHECK-NEXT:    fsub.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fadd.d fa4, fa2, fa3
+; CHECK-NEXT:    fsub.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fsub nsz reassoc double %a2, %t0
@@ -310,9 +310,9 @@ define double @test_reassoc_fsub5(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fsub6(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub6:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fsub.d ft1, fa2, fa3
-; CHECK-NEXT:    fsub.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fsub.d fa4, fa2, fa3
+; CHECK-NEXT:    fsub.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fsub nsz reassoc double %a2, %t0
@@ -323,9 +323,9 @@ define double @test_reassoc_fsub6(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fsub7(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fsub.d ft1, fa3, fa2
-; CHECK-NEXT:    fsub.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fsub.d fa4, fa3, fa2
+; CHECK-NEXT:    fsub.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd nsz reassoc double %t0, %a2
@@ -336,9 +336,9 @@ define double @test_reassoc_fsub7(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fsub8(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fsub.d ft1, fa3, fa2
-; CHECK-NEXT:    fadd.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fsub.d fa4, fa3, fa2
+; CHECK-NEXT:    fadd.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fsub nsz reassoc double %t0, %a2
@@ -349,9 +349,9 @@ define double @test_reassoc_fsub8(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fsub9(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub9:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft1, fa3, fa2
-; CHECK-NEXT:    fsub.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fadd.d fa4, fa3, fa2
+; CHECK-NEXT:    fsub.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fsub nsz reassoc double %t0, %a2
@@ -362,9 +362,9 @@ define double @test_reassoc_fsub9(double %a0, double %a1, double %a2, double %a3
 define double @test_reassoc_fsub10(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub10:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fsub.d ft1, fa3, fa2
-; CHECK-NEXT:    fsub.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fsub.d fa4, fa3, fa2
+; CHECK-NEXT:    fsub.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fadd nsz reassoc double %a2, %t0
@@ -375,9 +375,9 @@ define double @test_reassoc_fsub10(double %a0, double %a1, double %a2, double %a
 define double @test_reassoc_fsub11(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub11:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fadd.d ft1, fa3, fa2
-; CHECK-NEXT:    fsub.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fadd.d fa4, fa3, fa2
+; CHECK-NEXT:    fsub.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fsub nsz reassoc double %a2, %t0
@@ -388,9 +388,9 @@ define double @test_reassoc_fsub11(double %a0, double %a1, double %a2, double %a
 define double @test_reassoc_fsub12(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_reassoc_fsub12:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fadd.d ft0, fa0, fa1
-; CHECK-NEXT:    fsub.d ft1, fa3, fa2
-; CHECK-NEXT:    fadd.d fa0, ft1, ft0
+; CHECK-NEXT:    fadd.d fa5, fa0, fa1
+; CHECK-NEXT:    fsub.d fa4, fa3, fa2
+; CHECK-NEXT:    fadd.d fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %t0 = fadd nsz reassoc double %a0, %a1
   %t1 = fsub nsz reassoc double %a2, %t0
@@ -995,9 +995,9 @@ define i64 @test_reassoc_max_i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) {
 define half @test_fmin_f16(half %a0, half %a1, half %a2, half %a3) {
 ; CHECK-LABEL: test_fmin_f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmin.h ft0, fa0, fa1
-; CHECK-NEXT:    fmin.h ft1, fa2, fa3
-; CHECK-NEXT:    fmin.h fa0, ft0, ft1
+; CHECK-NEXT:    fmin.h fa5, fa0, fa1
+; CHECK-NEXT:    fmin.h fa4, fa2, fa3
+; CHECK-NEXT:    fmin.h fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = call half @llvm.minnum.f16(half %a0, half %a1)
   %t1 = call half @llvm.minnum.f16(half %t0, half %a2)
@@ -1008,9 +1008,9 @@ define half @test_fmin_f16(half %a0, half %a1, half %a2, half %a3) {
 define float @test_fmin_f32(float %a0, float %a1, float %a2, float %a3) {
 ; CHECK-LABEL: test_fmin_f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmin.s ft0, fa0, fa1
-; CHECK-NEXT:    fmin.s ft1, fa2, fa3
-; CHECK-NEXT:    fmin.s fa0, ft0, ft1
+; CHECK-NEXT:    fmin.s fa5, fa0, fa1
+; CHECK-NEXT:    fmin.s fa4, fa2, fa3
+; CHECK-NEXT:    fmin.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = call float @llvm.minnum.f32(float %a0, float %a1)
   %t1 = call float @llvm.minnum.f32(float %t0, float %a2)
@@ -1021,9 +1021,9 @@ define float @test_fmin_f32(float %a0, float %a1, float %a2, float %a3) {
 define double @test_fmin_f64(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_fmin_f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmin.d ft0, fa0, fa1
-; CHECK-NEXT:    fmin.d ft1, fa2, fa3
-; CHECK-NEXT:    fmin.d fa0, ft0, ft1
+; CHECK-NEXT:    fmin.d fa5, fa0, fa1
+; CHECK-NEXT:    fmin.d fa4, fa2, fa3
+; CHECK-NEXT:    fmin.d fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = call double @llvm.minnum.f64(double %a0, double %a1)
   %t1 = call double @llvm.minnum.f64(double %t0, double %a2)
@@ -1034,9 +1034,9 @@ define double @test_fmin_f64(double %a0, double %a1, double %a2, double %a3) {
 define half @test_fmax_f16(half %a0, half %a1, half %a2, half %a3) {
 ; CHECK-LABEL: test_fmax_f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmax.h ft0, fa0, fa1
-; CHECK-NEXT:    fmax.h ft1, fa2, fa3
-; CHECK-NEXT:    fmax.h fa0, ft0, ft1
+; CHECK-NEXT:    fmax.h fa5, fa0, fa1
+; CHECK-NEXT:    fmax.h fa4, fa2, fa3
+; CHECK-NEXT:    fmax.h fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = call half @llvm.maxnum.f16(half %a0, half %a1)
   %t1 = call half @llvm.maxnum.f16(half %t0, half %a2)
@@ -1047,9 +1047,9 @@ define half @test_fmax_f16(half %a0, half %a1, half %a2, half %a3) {
 define float @test_fmax_f32(float %a0, float %a1, float %a2, float %a3) {
 ; CHECK-LABEL: test_fmax_f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmax.s ft0, fa0, fa1
-; CHECK-NEXT:    fmax.s ft1, fa2, fa3
-; CHECK-NEXT:    fmax.s fa0, ft0, ft1
+; CHECK-NEXT:    fmax.s fa5, fa0, fa1
+; CHECK-NEXT:    fmax.s fa4, fa2, fa3
+; CHECK-NEXT:    fmax.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = call float @llvm.maxnum.f32(float %a0, float %a1)
   %t1 = call float @llvm.maxnum.f32(float %t0, float %a2)
@@ -1060,9 +1060,9 @@ define float @test_fmax_f32(float %a0, float %a1, float %a2, float %a3) {
 define double @test_fmax_f64(double %a0, double %a1, double %a2, double %a3) {
 ; CHECK-LABEL: test_fmax_f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fmax.d ft0, fa0, fa1
-; CHECK-NEXT:    fmax.d ft1, fa2, fa3
-; CHECK-NEXT:    fmax.d fa0, ft0, ft1
+; CHECK-NEXT:    fmax.d fa5, fa0, fa1
+; CHECK-NEXT:    fmax.d fa4, fa2, fa3
+; CHECK-NEXT:    fmax.d fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %t0 = call double @llvm.maxnum.f64(double %a0, double %a1)
   %t1 = call double @llvm.maxnum.f64(double %t0, double %a2)
@@ -1096,29 +1096,29 @@ declare double @llvm.maxnum.f64(double, double)
 define double @test_fmadd_strategy(double %a0, double %a1, double %a2, double %a3, i64 %flag) {
 ; CHECK_LOCAL-LABEL: test_fmadd_strategy:
 ; CHECK_LOCAL:       # %bb.0: # %entry
-; CHECK_LOCAL-NEXT:    fmv.d ft0, fa0
-; CHECK_LOCAL-NEXT:    fsub.d ft1, fa0, fa1
-; CHECK_LOCAL-NEXT:    fmul.d fa0, ft1, fa2
+; CHECK_LOCAL-NEXT:    fmv.d fa5, fa0
+; CHECK_LOCAL-NEXT:    fsub.d fa4, fa0, fa1
+; CHECK_LOCAL-NEXT:    fmul.d fa0, fa4, fa2
 ; CHECK_LOCAL-NEXT:    andi a0, a0, 1
 ; CHECK_LOCAL-NEXT:    beqz a0, .LBB76_2
 ; CHECK_LOCAL-NEXT:  # %bb.1: # %entry
-; CHECK_LOCAL-NEXT:    fmul.d ft1, ft0, fa1
-; CHECK_LOCAL-NEXT:    fmadd.d ft0, ft0, fa1, fa0
-; CHECK_LOCAL-NEXT:    fsub.d fa0, ft0, ft1
+; CHECK_LOCAL-NEXT:    fmul.d fa4, fa5, fa1
+; CHECK_LOCAL-NEXT:    fmadd.d fa5, fa5, fa1, fa0
+; CHECK_LOCAL-NEXT:    fsub.d fa0, fa5, fa4
 ; CHECK_LOCAL-NEXT:  .LBB76_2: # %entry
 ; CHECK_LOCAL-NEXT:    ret
 ;
 ; CHECK_GLOBAL-LABEL: test_fmadd_strategy:
 ; CHECK_GLOBAL:       # %bb.0: # %entry
-; CHECK_GLOBAL-NEXT:    fmv.d ft0, fa0
-; CHECK_GLOBAL-NEXT:    fsub.d ft1, fa0, fa1
-; CHECK_GLOBAL-NEXT:    fmul.d fa0, ft1, fa2
+; CHECK_GLOBAL-NEXT:    fmv.d fa5, fa0
+; CHECK_GLOBAL-NEXT:    fsub.d fa4, fa0, fa1
+; CHECK_GLOBAL-NEXT:    fmul.d fa0, fa4, fa2
 ; CHECK_GLOBAL-NEXT:    andi a0, a0, 1
 ; CHECK_GLOBAL-NEXT:    beqz a0, .LBB76_2
 ; CHECK_GLOBAL-NEXT:  # %bb.1: # %entry
-; CHECK_GLOBAL-NEXT:    fmul.d ft0, ft0, fa1
-; CHECK_GLOBAL-NEXT:    fadd.d ft1, ft0, fa0
-; CHECK_GLOBAL-NEXT:    fsub.d fa0, ft1, ft0
+; CHECK_GLOBAL-NEXT:    fmul.d fa5, fa5, fa1
+; CHECK_GLOBAL-NEXT:    fadd.d fa4, fa5, fa0
+; CHECK_GLOBAL-NEXT:    fsub.d fa0, fa4, fa5
 ; CHECK_GLOBAL-NEXT:  .LBB76_2: # %entry
 ; CHECK_GLOBAL-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/machine-cse.ll b/llvm/test/CodeGen/RISCV/machine-cse.ll
index 5b14a5d3cb2c..58cc042f90e4 100644
--- a/llvm/test/CodeGen/RISCV/machine-cse.ll
+++ b/llvm/test/CodeGen/RISCV/machine-cse.ll
@@ -84,21 +84,21 @@ declare half @llvm.fma.f16(half, half, half)
 define void @commute_fmadd_f16(half %x, half %y, half %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fmadd_f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fmadd.h ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsh ft0, 0(a0)
+; RV32-NEXT:    fmadd.h fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsh fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB2_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsh ft0, 0(a0)
+; RV32-NEXT:    fsh fa5, 0(a0)
 ; RV32-NEXT:  .LBB2_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fmadd_f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fmadd.h ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsh ft0, 0(a0)
+; RV64-NEXT:    fmadd.h fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsh fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB2_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsh ft0, 0(a0)
+; RV64-NEXT:    fsh fa5, 0(a0)
 ; RV64-NEXT:  .LBB2_2: # %falseblock
 ; RV64-NEXT:    ret
   %a = call half @llvm.fma.f16(half %x, half %y, half %z)
@@ -119,21 +119,21 @@ declare float @llvm.fma.f32(float, float, float)
 define void @commute_fmadd_f32(float %x, float %y, float %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fmadd_f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fmadd.s ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsw ft0, 0(a0)
+; RV32-NEXT:    fmadd.s fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsw fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB3_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsw ft0, 0(a0)
+; RV32-NEXT:    fsw fa5, 0(a0)
 ; RV32-NEXT:  .LBB3_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fmadd_f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fmadd.s ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsw ft0, 0(a0)
+; RV64-NEXT:    fmadd.s fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsw fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB3_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsw ft0, 0(a0)
+; RV64-NEXT:    fsw fa5, 0(a0)
 ; RV64-NEXT:  .LBB3_2: # %falseblock
 ; RV64-NEXT:    ret
   %a = call float @llvm.fma.f32(float %x, float %y, float %z)
@@ -154,21 +154,21 @@ declare double @llvm.fma.f64(double, double, double)
 define void @commute_fmadd_f64(double %x, double %y, double %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fmadd_f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fmadd.d ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsd ft0, 0(a0)
+; RV32-NEXT:    fmadd.d fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB4_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsd ft0, 0(a0)
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:  .LBB4_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fmadd_f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fmadd.d ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsd ft0, 0(a0)
+; RV64-NEXT:    fmadd.d fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB4_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsd ft0, 0(a0)
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:  .LBB4_2: # %falseblock
 ; RV64-NEXT:    ret
   %a = call double @llvm.fma.f64(double %x, double %y, double %z)
@@ -187,21 +187,21 @@ falseblock:
 define void @commute_fmsub_f16(half %x, half %y, half %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fmsub_f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fmsub.h ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsh ft0, 0(a0)
+; RV32-NEXT:    fmsub.h fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsh fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB5_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsh ft0, 0(a0)
+; RV32-NEXT:    fsh fa5, 0(a0)
 ; RV32-NEXT:  .LBB5_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fmsub_f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fmsub.h ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsh ft0, 0(a0)
+; RV64-NEXT:    fmsub.h fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsh fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB5_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsh ft0, 0(a0)
+; RV64-NEXT:    fsh fa5, 0(a0)
 ; RV64-NEXT:  .LBB5_2: # %falseblock
 ; RV64-NEXT:    ret
   %negz = fneg half %z
@@ -222,21 +222,21 @@ falseblock:
 define void @commute_fmsub_f32(float %x, float %y, float %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fmsub_f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fmsub.s ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsw ft0, 0(a0)
+; RV32-NEXT:    fmsub.s fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsw fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB6_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsw ft0, 0(a0)
+; RV32-NEXT:    fsw fa5, 0(a0)
 ; RV32-NEXT:  .LBB6_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fmsub_f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fmsub.s ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsw ft0, 0(a0)
+; RV64-NEXT:    fmsub.s fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsw fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB6_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsw ft0, 0(a0)
+; RV64-NEXT:    fsw fa5, 0(a0)
 ; RV64-NEXT:  .LBB6_2: # %falseblock
 ; RV64-NEXT:    ret
   %negz = fneg float %z
@@ -257,21 +257,21 @@ falseblock:
 define void @commute_fmsub_f64(double %x, double %y, double %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fmsub_f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fmsub.d ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsd ft0, 0(a0)
+; RV32-NEXT:    fmsub.d fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB7_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsd ft0, 0(a0)
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:  .LBB7_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fmsub_f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fmsub.d ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsd ft0, 0(a0)
+; RV64-NEXT:    fmsub.d fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB7_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsd ft0, 0(a0)
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:  .LBB7_2: # %falseblock
 ; RV64-NEXT:    ret
   %negz = fneg double %z
@@ -292,21 +292,21 @@ falseblock:
 define void @commute_fnmadd_f16(half %x, half %y, half %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fnmadd_f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fnmadd.h ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsh ft0, 0(a0)
+; RV32-NEXT:    fnmadd.h fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsh fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB8_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsh ft0, 0(a0)
+; RV32-NEXT:    fsh fa5, 0(a0)
 ; RV32-NEXT:  .LBB8_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fnmadd_f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fnmadd.h ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsh ft0, 0(a0)
+; RV64-NEXT:    fnmadd.h fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsh fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB8_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsh ft0, 0(a0)
+; RV64-NEXT:    fsh fa5, 0(a0)
 ; RV64-NEXT:  .LBB8_2: # %falseblock
 ; RV64-NEXT:    ret
   %negx = fneg half %x
@@ -329,21 +329,21 @@ falseblock:
 define void @commute_fnmadd_f32(float %x, float %y, float %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fnmadd_f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fnmadd.s ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsw ft0, 0(a0)
+; RV32-NEXT:    fnmadd.s fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsw fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB9_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsw ft0, 0(a0)
+; RV32-NEXT:    fsw fa5, 0(a0)
 ; RV32-NEXT:  .LBB9_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fnmadd_f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fnmadd.s ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsw ft0, 0(a0)
+; RV64-NEXT:    fnmadd.s fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsw fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB9_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsw ft0, 0(a0)
+; RV64-NEXT:    fsw fa5, 0(a0)
 ; RV64-NEXT:  .LBB9_2: # %falseblock
 ; RV64-NEXT:    ret
   %negx = fneg float %x
@@ -366,21 +366,21 @@ falseblock:
 define void @commute_fnmadd_f64(double %x, double %y, double %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fnmadd_f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fnmadd.d ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsd ft0, 0(a0)
+; RV32-NEXT:    fnmadd.d fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB10_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsd ft0, 0(a0)
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:  .LBB10_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fnmadd_f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fnmadd.d ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsd ft0, 0(a0)
+; RV64-NEXT:    fnmadd.d fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB10_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsd ft0, 0(a0)
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:  .LBB10_2: # %falseblock
 ; RV64-NEXT:    ret
   %negx = fneg double %x
@@ -403,21 +403,21 @@ falseblock:
 define void @commute_fnmsub_f16(half %x, half %y, half %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fnmsub_f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fnmsub.h ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsh ft0, 0(a0)
+; RV32-NEXT:    fnmsub.h fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsh fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB11_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsh ft0, 0(a0)
+; RV32-NEXT:    fsh fa5, 0(a0)
 ; RV32-NEXT:  .LBB11_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fnmsub_f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fnmsub.h ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsh ft0, 0(a0)
+; RV64-NEXT:    fnmsub.h fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsh fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB11_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsh ft0, 0(a0)
+; RV64-NEXT:    fsh fa5, 0(a0)
 ; RV64-NEXT:  .LBB11_2: # %falseblock
 ; RV64-NEXT:    ret
   %negx = fneg half %x
@@ -438,21 +438,21 @@ falseblock:
 define void @commute_fnmsub_f32(float %x, float %y, float %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fnmsub_f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fnmsub.s ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsw ft0, 0(a0)
+; RV32-NEXT:    fnmsub.s fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsw fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB12_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsw ft0, 0(a0)
+; RV32-NEXT:    fsw fa5, 0(a0)
 ; RV32-NEXT:  .LBB12_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fnmsub_f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fnmsub.s ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsw ft0, 0(a0)
+; RV64-NEXT:    fnmsub.s fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsw fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB12_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsw ft0, 0(a0)
+; RV64-NEXT:    fsw fa5, 0(a0)
 ; RV64-NEXT:  .LBB12_2: # %falseblock
 ; RV64-NEXT:    ret
   %negx = fneg float %x
@@ -473,21 +473,21 @@ falseblock:
 define void @commute_fnmsub_f64(double %x, double %y, double %z, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fnmsub_f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fnmsub.d ft0, fa0, fa1, fa2
-; RV32-NEXT:    fsd ft0, 0(a0)
+; RV32-NEXT:    fnmsub.d fa5, fa0, fa1, fa2
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB13_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsd ft0, 0(a0)
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:  .LBB13_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fnmsub_f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fnmsub.d ft0, fa0, fa1, fa2
-; RV64-NEXT:    fsd ft0, 0(a0)
+; RV64-NEXT:    fnmsub.d fa5, fa0, fa1, fa2
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB13_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsd ft0, 0(a0)
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:  .LBB13_2: # %falseblock
 ; RV64-NEXT:    ret
   %negx = fneg double %x
@@ -508,21 +508,21 @@ falseblock:
 define void @commute_fadd_f16(half %x, half %y, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fadd_f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fadd.h ft0, fa0, fa1
-; RV32-NEXT:    fsh ft0, 0(a0)
+; RV32-NEXT:    fadd.h fa5, fa0, fa1
+; RV32-NEXT:    fsh fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB14_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsh ft0, 0(a0)
+; RV32-NEXT:    fsh fa5, 0(a0)
 ; RV32-NEXT:  .LBB14_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fadd_f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fadd.h ft0, fa0, fa1
-; RV64-NEXT:    fsh ft0, 0(a0)
+; RV64-NEXT:    fadd.h fa5, fa0, fa1
+; RV64-NEXT:    fsh fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB14_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsh ft0, 0(a0)
+; RV64-NEXT:    fsh fa5, 0(a0)
 ; RV64-NEXT:  .LBB14_2: # %falseblock
 ; RV64-NEXT:    ret
   %a = fadd half %x, %y
@@ -541,21 +541,21 @@ falseblock:
 define void @commute_fadd_f32(float %x, float %y, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fadd_f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fadd.s ft0, fa0, fa1
-; RV32-NEXT:    fsw ft0, 0(a0)
+; RV32-NEXT:    fadd.s fa5, fa0, fa1
+; RV32-NEXT:    fsw fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB15_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsw ft0, 0(a0)
+; RV32-NEXT:    fsw fa5, 0(a0)
 ; RV32-NEXT:  .LBB15_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fadd_f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fadd.s ft0, fa0, fa1
-; RV64-NEXT:    fsw ft0, 0(a0)
+; RV64-NEXT:    fadd.s fa5, fa0, fa1
+; RV64-NEXT:    fsw fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB15_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsw ft0, 0(a0)
+; RV64-NEXT:    fsw fa5, 0(a0)
 ; RV64-NEXT:  .LBB15_2: # %falseblock
 ; RV64-NEXT:    ret
   %a = fadd float %x, %y
@@ -574,21 +574,21 @@ falseblock:
 define void @commute_fadd_f64(double %x, double %y, ptr %p1, ptr %p2, i1 zeroext %cond) {
 ; RV32-LABEL: commute_fadd_f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fadd.d ft0, fa0, fa1
-; RV32-NEXT:    fsd ft0, 0(a0)
+; RV32-NEXT:    fadd.d fa5, fa0, fa1
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:    beqz a2, .LBB16_2
 ; RV32-NEXT:  # %bb.1: # %trueblock
-; RV32-NEXT:    fsd ft0, 0(a0)
+; RV32-NEXT:    fsd fa5, 0(a0)
 ; RV32-NEXT:  .LBB16_2: # %falseblock
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: commute_fadd_f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fadd.d ft0, fa0, fa1
-; RV64-NEXT:    fsd ft0, 0(a0)
+; RV64-NEXT:    fadd.d fa5, fa0, fa1
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:    beqz a2, .LBB16_2
 ; RV64-NEXT:  # %bb.1: # %trueblock
-; RV64-NEXT:    fsd ft0, 0(a0)
+; RV64-NEXT:    fsd fa5, 0(a0)
 ; RV64-NEXT:  .LBB16_2: # %falseblock
 ; RV64-NEXT:    ret
   %a = fadd double %x, %y

diff  --git a/llvm/test/CodeGen/RISCV/make-compressible.mir b/llvm/test/CodeGen/RISCV/make-compressible.mir
index f49868402c24..e526b131a017 100644
--- a/llvm/test/CodeGen/RISCV/make-compressible.mir
+++ b/llvm/test/CodeGen/RISCV/make-compressible.mir
@@ -330,10 +330,10 @@ body:             |
     ; RV32-LABEL: name: store_common_value_float
     ; RV32: liveins: $x10, $x11, $x12, $f16_f
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: $f10_f = FSGNJ_S $f16_f, $f16_f
-    ; RV32-NEXT: FSW $f10_f, killed renamable $x10, 0 :: (store (s32) into %ir.a)
-    ; RV32-NEXT: FSW $f10_f, killed renamable $x11, 0 :: (store (s32) into %ir.b)
-    ; RV32-NEXT: FSW killed $f10_f, killed renamable $x12, 0 :: (store (s32) into %ir.c)
+    ; RV32-NEXT: $f15_f = FSGNJ_S $f16_f, $f16_f
+    ; RV32-NEXT: FSW $f15_f, killed renamable $x10, 0 :: (store (s32) into %ir.a)
+    ; RV32-NEXT: FSW $f15_f, killed renamable $x11, 0 :: (store (s32) into %ir.b)
+    ; RV32-NEXT: FSW killed $f15_f, killed renamable $x12, 0 :: (store (s32) into %ir.c)
     ; RV32-NEXT: PseudoRET
     ; RV64-LABEL: name: store_common_value_float
     ; RV64: liveins: $x10, $x11, $x12, $f16_f
@@ -358,18 +358,18 @@ body:             |
     ; RV32-LABEL: name: store_common_value_double
     ; RV32: liveins: $x10, $x11, $x12, $f16_d
     ; RV32-NEXT: {{  $}}
-    ; RV32-NEXT: $f10_d = FSGNJ_D $f16_d, $f16_d
-    ; RV32-NEXT: FSD $f10_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
-    ; RV32-NEXT: FSD $f10_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
-    ; RV32-NEXT: FSD killed $f10_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
+    ; RV32-NEXT: $f15_d = FSGNJ_D $f16_d, $f16_d
+    ; RV32-NEXT: FSD $f15_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
+    ; RV32-NEXT: FSD $f15_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
+    ; RV32-NEXT: FSD killed $f15_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
     ; RV32-NEXT: PseudoRET
     ; RV64-LABEL: name: store_common_value_double
     ; RV64: liveins: $x10, $x11, $x12, $f16_d
     ; RV64-NEXT: {{  $}}
-    ; RV64-NEXT: $f10_d = FSGNJ_D $f16_d, $f16_d
-    ; RV64-NEXT: FSD $f10_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
-    ; RV64-NEXT: FSD $f10_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
-    ; RV64-NEXT: FSD killed $f10_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
+    ; RV64-NEXT: $f15_d = FSGNJ_D $f16_d, $f16_d
+    ; RV64-NEXT: FSD $f15_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
+    ; RV64-NEXT: FSD $f15_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
+    ; RV64-NEXT: FSD killed $f15_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
     ; RV64-NEXT: PseudoRET
     FSD renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
     FSD renamable $f16_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)

diff  --git a/llvm/test/CodeGen/RISCV/module-target-abi.ll b/llvm/test/CodeGen/RISCV/module-target-abi.ll
index bc61c50b0822..0e3f62046eb8 100644
--- a/llvm/test/CodeGen/RISCV/module-target-abi.ll
+++ b/llvm/test/CodeGen/RISCV/module-target-abi.ll
@@ -12,9 +12,9 @@
 
 define float @foo(i32 %a) nounwind #0 {
 ; DEFAULT: # %bb.0:
-; DEFAULT: fmv.x.w a0, ft0
+; DEFAULT: fmv.x.w a0, fa5
 ; RV32IF-ILP32: # %bb.0:
-; RV32IF-ILP32: fmv.x.w a0, ft0
+; RV32IF-ILP32: fmv.x.w a0, fa5
   %conv = sitofp i32 %a to float
   ret float %conv
 }

diff  --git a/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll b/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll
index 8315225b0c34..f183c936fc67 100644
--- a/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll
+++ b/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll
@@ -5,8 +5,8 @@
 define void @single_fdiv(double %a0, double %a1, ptr %res) {
 ; CHECK-LABEL: single_fdiv:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    fdiv.d ft0, fa1, fa0
-; CHECK-NEXT:    fsd ft0, 0(a0)
+; CHECK-NEXT:    fdiv.d fa5, fa1, fa0
+; CHECK-NEXT:    fsd fa5, 0(a0)
 ; CHECK-NEXT:    ret
 entry:
   %div = fdiv arcp double %a1, %a0
@@ -18,12 +18,12 @@ define void @two_fdivs(double %a0, double %a1, double %a2, ptr %res) {
 ; CHECK-LABEL: two_fdivs:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI1_0)(a1)
-; CHECK-NEXT:    fdiv.d ft0, ft0, fa0
-; CHECK-NEXT:    fmul.d ft1, fa1, ft0
-; CHECK-NEXT:    fmul.d ft0, fa2, ft0
-; CHECK-NEXT:    fsd ft1, 0(a0)
-; CHECK-NEXT:    fsd ft0, 8(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    fdiv.d fa5, fa5, fa0
+; CHECK-NEXT:    fmul.d fa4, fa1, fa5
+; CHECK-NEXT:    fmul.d fa5, fa2, fa5
+; CHECK-NEXT:    fsd fa4, 0(a0)
+; CHECK-NEXT:    fsd fa5, 8(a0)
 ; CHECK-NEXT:    ret
 entry:
   %div = fdiv arcp double %a1, %a0
@@ -38,10 +38,10 @@ entry:
 define void @no_arcp(double %a0, double %a1, double %a2, ptr %res) {
 ; CHECK-LABEL: no_arcp:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    fdiv.d ft0, fa1, fa0
-; CHECK-NEXT:    fdiv.d ft1, fa2, fa0
-; CHECK-NEXT:    fsd ft0, 0(a0)
-; CHECK-NEXT:    fsd ft1, 8(a0)
+; CHECK-NEXT:    fdiv.d fa5, fa1, fa0
+; CHECK-NEXT:    fdiv.d fa4, fa2, fa0
+; CHECK-NEXT:    fsd fa5, 0(a0)
+; CHECK-NEXT:    fsd fa4, 8(a0)
 ; CHECK-NEXT:    ret
 entry:
   %div = fdiv arcp double %a1, %a0

diff  --git a/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll
index f624c78132c6..dbc864c79238 100644
--- a/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll
@@ -66,8 +66,8 @@ define zeroext i32 @zext_fptoui(float %a) nounwind {
 define i32 @bcvt_f32_to_aext_i32(float %a, float %b) nounwind {
 ; RV64IF-LABEL: bcvt_f32_to_aext_i32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fadd.s ft0, fa0, fa1
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fadd.s fa5, fa0, fa1
+; RV64IF-NEXT:    fmv.x.w a0, fa5
 ; RV64IF-NEXT:    ret
   %1 = fadd float %a, %b
   %2 = bitcast float %1 to i32
@@ -77,8 +77,8 @@ define i32 @bcvt_f32_to_aext_i32(float %a, float %b) nounwind {
 define signext i32 @bcvt_f32_to_sext_i32(float %a, float %b) nounwind {
 ; RV64IF-LABEL: bcvt_f32_to_sext_i32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fadd.s ft0, fa0, fa1
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fadd.s fa5, fa0, fa1
+; RV64IF-NEXT:    fmv.x.w a0, fa5
 ; RV64IF-NEXT:    ret
   %1 = fadd float %a, %b
   %2 = bitcast float %1 to i32
@@ -88,8 +88,8 @@ define signext i32 @bcvt_f32_to_sext_i32(float %a, float %b) nounwind {
 define zeroext i32 @bcvt_f32_to_zext_i32(float %a, float %b) nounwind {
 ; RV64IF-LABEL: bcvt_f32_to_zext_i32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fadd.s ft0, fa0, fa1
-; RV64IF-NEXT:    fmv.x.w a0, ft0
+; RV64IF-NEXT:    fadd.s fa5, fa0, fa1
+; RV64IF-NEXT:    fmv.x.w a0, fa5
 ; RV64IF-NEXT:    slli a0, a0, 32
 ; RV64IF-NEXT:    srli a0, a0, 32
 ; RV64IF-NEXT:    ret
@@ -101,9 +101,9 @@ define zeroext i32 @bcvt_f32_to_zext_i32(float %a, float %b) nounwind {
 define float @bcvt_i64_to_f32_via_i32(i64 %a, i64 %b) nounwind {
 ; RV64IF-LABEL: bcvt_i64_to_f32_via_i32:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fmv.w.x ft0, a0
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fadd.s fa0, ft0, ft1
+; RV64IF-NEXT:    fmv.w.x fa5, a0
+; RV64IF-NEXT:    fmv.w.x fa4, a1
+; RV64IF-NEXT:    fadd.s fa0, fa5, fa4
 ; RV64IF-NEXT:    ret
   %1 = trunc i64 %a to i32
   %2 = trunc i64 %b to i32

diff  --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll
index 3f5de3430993..e7beb469c2e9 100644
--- a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll
@@ -66,8 +66,8 @@ define zeroext i32 @zext_fptoui(half %a) nounwind {
 define i16 @bcvt_f16_to_aext_i16(half %a, half %b) nounwind {
 ; RV64IZFH-LABEL: bcvt_f16_to_aext_i16:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    fadd.h ft0, fa0, fa1
-; RV64IZFH-NEXT:    fmv.x.h a0, ft0
+; RV64IZFH-NEXT:    fadd.h fa5, fa0, fa1
+; RV64IZFH-NEXT:    fmv.x.h a0, fa5
 ; RV64IZFH-NEXT:    ret
   %1 = fadd half %a, %b
   %2 = bitcast half %1 to i16
@@ -77,8 +77,8 @@ define i16 @bcvt_f16_to_aext_i16(half %a, half %b) nounwind {
 define signext i16 @bcvt_f16_to_sext_i16(half %a, half %b) nounwind {
 ; RV64IZFH-LABEL: bcvt_f16_to_sext_i16:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    fadd.h ft0, fa0, fa1
-; RV64IZFH-NEXT:    fmv.x.h a0, ft0
+; RV64IZFH-NEXT:    fadd.h fa5, fa0, fa1
+; RV64IZFH-NEXT:    fmv.x.h a0, fa5
 ; RV64IZFH-NEXT:    ret
   %1 = fadd half %a, %b
   %2 = bitcast half %1 to i16
@@ -88,8 +88,8 @@ define signext i16 @bcvt_f16_to_sext_i16(half %a, half %b) nounwind {
 define zeroext i16 @bcvt_f16_to_zext_i16(half %a, half %b) nounwind {
 ; RV64IZFH-LABEL: bcvt_f16_to_zext_i16:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    fadd.h ft0, fa0, fa1
-; RV64IZFH-NEXT:    fmv.x.h a0, ft0
+; RV64IZFH-NEXT:    fadd.h fa5, fa0, fa1
+; RV64IZFH-NEXT:    fmv.x.h a0, fa5
 ; RV64IZFH-NEXT:    slli a0, a0, 48
 ; RV64IZFH-NEXT:    srli a0, a0, 48
 ; RV64IZFH-NEXT:    ret
@@ -101,9 +101,9 @@ define zeroext i16 @bcvt_f16_to_zext_i16(half %a, half %b) nounwind {
 define half @bcvt_i64_to_f16_via_i16(i64 %a, i64 %b) nounwind {
 ; RV64IZFH-LABEL: bcvt_i64_to_f16_via_i16:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    fmv.h.x ft0, a0
-; RV64IZFH-NEXT:    fmv.h.x ft1, a1
-; RV64IZFH-NEXT:    fadd.h fa0, ft0, ft1
+; RV64IZFH-NEXT:    fmv.h.x fa5, a0
+; RV64IZFH-NEXT:    fmv.h.x fa4, a1
+; RV64IZFH-NEXT:    fadd.h fa0, fa5, fa4
 ; RV64IZFH-NEXT:    ret
   %1 = trunc i64 %a to i16
   %2 = trunc i64 %b to i16

diff  --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll
index de7f018c3798..24fae353a2b8 100644
--- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll
@@ -8,8 +8,8 @@
 define i32 @aext_fptosi(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: aext_fptosi:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i32 %1
@@ -19,8 +19,8 @@ declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata)
 define signext i32 @sext_fptosi(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: sext_fptosi:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i32 %1
@@ -29,8 +29,8 @@ define signext i32 @sext_fptosi(half %a) nounwind {
 define zeroext i32 @zext_fptosi(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: zext_fptosi:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    slli a0, a0, 32
 ; RV64IZFHMIN-NEXT:    srli a0, a0, 32
 ; RV64IZFHMIN-NEXT:    ret
@@ -41,8 +41,8 @@ define zeroext i32 @zext_fptosi(half %a) nounwind {
 define i32 @aext_fptoui(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: aext_fptoui:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i32 %1
@@ -52,8 +52,8 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata)
 define signext i32 @sext_fptoui(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: sext_fptoui:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i32 %1
@@ -62,8 +62,8 @@ define signext i32 @sext_fptoui(half %a) nounwind {
 define zeroext i32 @zext_fptoui(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: zext_fptoui:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i32 %1
@@ -74,8 +74,8 @@ define half @uitofp_aext_i32_to_f16(i32 %a) nounwind {
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    slli a0, a0, 32
 ; RV64IZFHMIN-NEXT:    srli a0, a0, 32
-; RV64IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -87,8 +87,8 @@ define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind {
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    slli a0, a0, 32
 ; RV64IZFHMIN-NEXT:    srli a0, a0, 32
-; RV64IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -97,8 +97,8 @@ define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind {
 define half @uitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
 ; RV64IZFHMIN-LABEL: uitofp_zext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -108,8 +108,8 @@ define half @sitofp_aext_i32_to_f16(i32 %a) nounwind {
 ; RV64IZFHMIN-LABEL: sitofp_aext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    sext.w a0, a0
-; RV64IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -119,8 +119,8 @@ declare half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata, met
 define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind {
 ; RV64IZFHMIN-LABEL: sitofp_sext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -130,8 +130,8 @@ define half @sitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
 ; RV64IZFHMIN-LABEL: sitofp_zext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    sext.w a0, a0
-; RV64IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1

diff  --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert.ll
index 0c97a61fbcaf..891f7dd88bba 100644
--- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert.ll
@@ -7,8 +7,8 @@
 define i32 @aext_fptosi(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: aext_fptosi:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %1 = fptosi half %a to i32
   ret i32 %1
@@ -17,8 +17,8 @@ define i32 @aext_fptosi(half %a) nounwind {
 define signext i32 @sext_fptosi(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: sext_fptosi:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %1 = fptosi half %a to i32
   ret i32 %1
@@ -27,8 +27,8 @@ define signext i32 @sext_fptosi(half %a) nounwind {
 define zeroext i32 @zext_fptosi(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: zext_fptosi:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    slli a0, a0, 32
 ; RV64IZFHMIN-NEXT:    srli a0, a0, 32
 ; RV64IZFHMIN-NEXT:    ret
@@ -39,8 +39,8 @@ define zeroext i32 @zext_fptosi(half %a) nounwind {
 define i32 @aext_fptoui(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: aext_fptoui:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %1 = fptoui half %a to i32
   ret i32 %1
@@ -49,8 +49,8 @@ define i32 @aext_fptoui(half %a) nounwind {
 define signext i32 @sext_fptoui(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: sext_fptoui:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.wu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %1 = fptoui half %a to i32
   ret i32 %1
@@ -59,8 +59,8 @@ define signext i32 @sext_fptoui(half %a) nounwind {
 define zeroext i32 @zext_fptoui(half %a) nounwind {
 ; RV64IZFHMIN-LABEL: zext_fptoui:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.lu.s a0, fa5, rtz
 ; RV64IZFHMIN-NEXT:    ret
   %1 = fptoui half %a to i32
   ret i32 %1
@@ -69,11 +69,11 @@ define zeroext i32 @zext_fptoui(half %a) nounwind {
 define i16 @bcvt_f16_to_aext_i16(half %a, half %b) nounwind {
 ; RV64IZFHMIN-LABEL: bcvt_f16_to_aext_i16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fmv.x.h a0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fmv.x.h a0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = fadd half %a, %b
   %2 = bitcast half %1 to i16
@@ -83,11 +83,11 @@ define i16 @bcvt_f16_to_aext_i16(half %a, half %b) nounwind {
 define signext i16 @bcvt_f16_to_sext_i16(half %a, half %b) nounwind {
 ; RV64IZFHMIN-LABEL: bcvt_f16_to_sext_i16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fmv.x.h a0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fmv.x.h a0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = fadd half %a, %b
   %2 = bitcast half %1 to i16
@@ -97,11 +97,11 @@ define signext i16 @bcvt_f16_to_sext_i16(half %a, half %b) nounwind {
 define zeroext i16 @bcvt_f16_to_zext_i16(half %a, half %b) nounwind {
 ; RV64IZFHMIN-LABEL: bcvt_f16_to_zext_i16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, fa0
-; RV64IZFHMIN-NEXT:    fadd.s ft0, ft1, ft0
-; RV64IZFHMIN-NEXT:    fcvt.h.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fmv.x.h a0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa0
+; RV64IZFHMIN-NEXT:    fadd.s fa5, fa4, fa5
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fmv.x.h a0, fa5
 ; RV64IZFHMIN-NEXT:    slli a0, a0, 48
 ; RV64IZFHMIN-NEXT:    srli a0, a0, 48
 ; RV64IZFHMIN-NEXT:    ret
@@ -113,12 +113,12 @@ define zeroext i16 @bcvt_f16_to_zext_i16(half %a, half %b) nounwind {
 define half @bcvt_i64_to_f16_via_i16(i64 %a, i64 %b) nounwind {
 ; RV64IZFHMIN-LABEL: bcvt_i64_to_f16_via_i16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fmv.h.x ft0, a0
-; RV64IZFHMIN-NEXT:    fmv.h.x ft1, a1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft1, ft1
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, ft0
-; RV64IZFHMIN-NEXT:    fadd.s ft0, ft0, ft1
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fmv.h.x fa5, a0
+; RV64IZFHMIN-NEXT:    fmv.h.x fa4, a1
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa4, fa4
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa5
+; RV64IZFHMIN-NEXT:    fadd.s fa5, fa5, fa4
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = trunc i64 %a to i16
   %2 = trunc i64 %b to i16
@@ -133,8 +133,8 @@ define half @uitofp_aext_i32_to_f16(i32 %a) nounwind {
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    slli a0, a0, 32
 ; RV64IZFHMIN-NEXT:    srli a0, a0, 32
-; RV64IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = uitofp i32 %a to half
   ret half %1
@@ -145,8 +145,8 @@ define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind {
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    slli a0, a0, 32
 ; RV64IZFHMIN-NEXT:    srli a0, a0, 32
-; RV64IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = uitofp i32 %a to half
   ret half %1
@@ -155,8 +155,8 @@ define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind {
 define half @uitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
 ; RV64IZFHMIN-LABEL: uitofp_zext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.lu ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = uitofp i32 %a to half
   ret half %1
@@ -166,8 +166,8 @@ define half @sitofp_aext_i32_to_f16(i32 %a) nounwind {
 ; RV64IZFHMIN-LABEL: sitofp_aext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    sext.w a0, a0
-; RV64IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = sitofp i32 %a to half
   ret half %1
@@ -176,8 +176,8 @@ define half @sitofp_aext_i32_to_f16(i32 %a) nounwind {
 define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind {
 ; RV64IZFHMIN-LABEL: sitofp_sext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = sitofp i32 %a to half
   ret half %1
@@ -187,8 +187,8 @@ define half @sitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
 ; RV64IZFHMIN-LABEL: sitofp_zext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    sext.w a0, a0
-; RV64IZFHMIN-NEXT:    fcvt.s.l ft0, a0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.l fa5, a0
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = sitofp i32 %a to half
   ret half %1

diff  --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll
index 5dd6199b18ea..6052e7a20cfc 100644
--- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll
@@ -13,8 +13,8 @@ declare i64 @llvm.llrint.i64.f16(half)
 define i64 @llrint_f16(half %a) nounwind {
 ; CHECKIZFHMIN-LABEL: llrint_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.l.s a0, ft0
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.l.s a0, fa5
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call i64 @llvm.llrint.i64.f16(half %a)
   ret i64 %1
@@ -25,8 +25,8 @@ declare i64 @llvm.llround.i64.f16(half)
 define i64 @llround_f16(half %a) nounwind {
 ; CHECKIZFHMIN-LABEL: llround_f16:
 ; CHECKIZFHMIN:       # %bb.0:
-; CHECKIZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; CHECKIZFHMIN-NEXT:    fcvt.l.s a0, ft0, rmm
+; CHECKIZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT:    fcvt.l.s a0, fa5, rmm
 ; CHECKIZFHMIN-NEXT:    ret
   %1 = call i64 @llvm.llround.i64.f16(half %a)
   ret i64 %1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index 8c3073add846..28362f6cf6cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -10,11 +10,11 @@ define <vscale x 1 x half> @vp_ceil_vv_nxv1f16(<vscale x 1 x half> %va, <vscale
 ; CHECK-LABEL: vp_ceil_vv_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,10 +31,10 @@ define <vscale x 1 x half> @vp_ceil_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-LABEL: vp_ceil_vv_nxv1f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -54,11 +54,11 @@ define <vscale x 2 x half> @vp_ceil_vv_nxv2f16(<vscale x 2 x half> %va, <vscale
 ; CHECK-LABEL: vp_ceil_vv_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -75,10 +75,10 @@ define <vscale x 2 x half> @vp_ceil_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-LABEL: vp_ceil_vv_nxv2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -98,11 +98,11 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
 ; CHECK-LABEL: vp_ceil_vv_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -119,10 +119,10 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-LABEL: vp_ceil_vv_nxv4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -143,11 +143,11 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -165,10 +165,10 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-LABEL: vp_ceil_vv_nxv8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -189,11 +189,11 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI8_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI8_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -211,10 +211,10 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-LABEL: vp_ceil_vv_nxv16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI9_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI9_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI9_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -235,11 +235,11 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI10_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI10_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -257,10 +257,10 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-LABEL: vp_ceil_vv_nxv32f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI11_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI11_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI11_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -282,9 +282,9 @@ define <vscale x 1 x float> @vp_ceil_vv_nxv1f32(<vscale x 1 x float> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -303,8 +303,8 @@ define <vscale x 1 x float> @vp_ceil_vv_nxv1f32_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -326,9 +326,9 @@ define <vscale x 2 x float> @vp_ceil_vv_nxv2f32(<vscale x 2 x float> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -347,8 +347,8 @@ define <vscale x 2 x float> @vp_ceil_vv_nxv2f32_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -371,9 +371,9 @@ define <vscale x 4 x float> @vp_ceil_vv_nxv4f32(<vscale x 4 x float> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -393,8 +393,8 @@ define <vscale x 4 x float> @vp_ceil_vv_nxv4f32_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -417,9 +417,9 @@ define <vscale x 8 x float> @vp_ceil_vv_nxv8f32(<vscale x 8 x float> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -439,8 +439,8 @@ define <vscale x 8 x float> @vp_ceil_vv_nxv8f32_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -463,9 +463,9 @@ define <vscale x 16 x float> @vp_ceil_vv_nxv16f32(<vscale x 16 x float> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -485,8 +485,8 @@ define <vscale x 16 x float> @vp_ceil_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -506,11 +506,11 @@ define <vscale x 1 x double> @vp_ceil_vv_nxv1f64(<vscale x 1 x double> %va, <vsc
 ; CHECK-LABEL: vp_ceil_vv_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -527,10 +527,10 @@ define <vscale x 1 x double> @vp_ceil_vv_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-LABEL: vp_ceil_vv_nxv1f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -551,11 +551,11 @@ define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -573,10 +573,10 @@ define <vscale x 2 x double> @vp_ceil_vv_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-LABEL: vp_ceil_vv_nxv2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -597,11 +597,11 @@ define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -619,10 +619,10 @@ define <vscale x 4 x double> @vp_ceil_vv_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-LABEL: vp_ceil_vv_nxv4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -643,11 +643,11 @@ define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI28_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI28_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -665,10 +665,10 @@ define <vscale x 7 x double> @vp_ceil_vv_nxv7f64_unmasked(<vscale x 7 x double>
 ; CHECK-LABEL: vp_ceil_vv_nxv7f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI29_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI29_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI29_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -689,11 +689,11 @@ define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI30_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI30_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -711,10 +711,10 @@ define <vscale x 8 x double> @vp_ceil_vv_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-LABEL: vp_ceil_vv_nxv8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI31_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI31_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI31_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -750,12 +750,12 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI32_0)(a3)
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a2, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -776,7 +776,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -802,13 +802,13 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64_unmasked(<vscale x 16 x doubl
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    sub a2, a0, a1
 ; CHECK-NEXT:    lui a3, %hi(.LCPI33_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI33_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI33_0)(a3)
 ; CHECK-NEXT:    sltu a3, a0, a2
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a2, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; CHECK-NEXT:    fsrm a2
@@ -821,7 +821,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64_unmasked(<vscale x 16 x doubl
 ; CHECK-NEXT:  .LBB33_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll
index b2eadd45ea5f..64e03e3fad27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll
@@ -14,10 +14,10 @@ define <vscale x 1 x i8> @trunc_nxv1f64_to_si8(<vscale x 1 x double> %x) {
 ; RV32-LABEL: trunc_nxv1f64_to_si8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI0_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI0_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI0_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -33,10 +33,10 @@ define <vscale x 1 x i8> @trunc_nxv1f64_to_si8(<vscale x 1 x double> %x) {
 ; RV64-LABEL: trunc_nxv1f64_to_si8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI0_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI0_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI0_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -57,10 +57,10 @@ define <vscale x 1 x i8> @trunc_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
 ; RV32-LABEL: trunc_nxv1f64_to_ui8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI1_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI1_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -76,10 +76,10 @@ define <vscale x 1 x i8> @trunc_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
 ; RV64-LABEL: trunc_nxv1f64_to_ui8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI1_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI1_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -100,10 +100,10 @@ define <vscale x 1 x i16> @trunc_nxv1f64_to_si16(<vscale x 1 x double> %x) {
 ; RV32-LABEL: trunc_nxv1f64_to_si16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI2_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI2_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI2_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -117,10 +117,10 @@ define <vscale x 1 x i16> @trunc_nxv1f64_to_si16(<vscale x 1 x double> %x) {
 ; RV64-LABEL: trunc_nxv1f64_to_si16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI2_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI2_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI2_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -139,10 +139,10 @@ define <vscale x 1 x i16> @trunc_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
 ; RV32-LABEL: trunc_nxv1f64_to_ui16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI3_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI3_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -156,10 +156,10 @@ define <vscale x 1 x i16> @trunc_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
 ; RV64-LABEL: trunc_nxv1f64_to_ui16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI3_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI3_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -256,10 +256,10 @@ define <vscale x 4 x i8> @trunc_nxv4f64_to_si8(<vscale x 4 x double> %x) {
 ; RV32-LABEL: trunc_nxv4f64_to_si8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI8_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI8_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI8_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vfabs.v v12, v8
-; RV32-NEXT:    vmflt.vf v0, v12, ft0
+; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -275,10 +275,10 @@ define <vscale x 4 x i8> @trunc_nxv4f64_to_si8(<vscale x 4 x double> %x) {
 ; RV64-LABEL: trunc_nxv4f64_to_si8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI8_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI8_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI8_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vfabs.v v12, v8
-; RV64-NEXT:    vmflt.vf v0, v12, ft0
+; RV64-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -299,10 +299,10 @@ define <vscale x 4 x i8> @trunc_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
 ; RV32-LABEL: trunc_nxv4f64_to_ui8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI9_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI9_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI9_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vfabs.v v12, v8
-; RV32-NEXT:    vmflt.vf v0, v12, ft0
+; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -318,10 +318,10 @@ define <vscale x 4 x i8> @trunc_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
 ; RV64-LABEL: trunc_nxv4f64_to_ui8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI9_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI9_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI9_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vfabs.v v12, v8
-; RV64-NEXT:    vmflt.vf v0, v12, ft0
+; RV64-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -342,10 +342,10 @@ define <vscale x 4 x i16> @trunc_nxv4f64_to_si16(<vscale x 4 x double> %x) {
 ; RV32-LABEL: trunc_nxv4f64_to_si16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI10_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI10_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI10_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vfabs.v v12, v8
-; RV32-NEXT:    vmflt.vf v0, v12, ft0
+; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -359,10 +359,10 @@ define <vscale x 4 x i16> @trunc_nxv4f64_to_si16(<vscale x 4 x double> %x) {
 ; RV64-LABEL: trunc_nxv4f64_to_si16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI10_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI10_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI10_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vfabs.v v12, v8
-; RV64-NEXT:    vmflt.vf v0, v12, ft0
+; RV64-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -381,10 +381,10 @@ define <vscale x 4 x i16> @trunc_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
 ; RV32-LABEL: trunc_nxv4f64_to_ui16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI11_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vfabs.v v12, v8
-; RV32-NEXT:    vmflt.vf v0, v12, ft0
+; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -398,10 +398,10 @@ define <vscale x 4 x i16> @trunc_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
 ; RV64-LABEL: trunc_nxv4f64_to_ui16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI11_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vfabs.v v12, v8
-; RV64-NEXT:    vmflt.vf v0, v12, ft0
+; RV64-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -498,10 +498,10 @@ define <vscale x 1 x i8> @ceil_nxv1f64_to_si8(<vscale x 1 x double> %x) {
 ; RV32-LABEL: ceil_nxv1f64_to_si8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI16_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI16_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -519,10 +519,10 @@ define <vscale x 1 x i8> @ceil_nxv1f64_to_si8(<vscale x 1 x double> %x) {
 ; RV64-LABEL: ceil_nxv1f64_to_si8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI16_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI16_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -545,10 +545,10 @@ define <vscale x 1 x i8> @ceil_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
 ; RV32-LABEL: ceil_nxv1f64_to_ui8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI17_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI17_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -566,10 +566,10 @@ define <vscale x 1 x i8> @ceil_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
 ; RV64-LABEL: ceil_nxv1f64_to_ui8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI17_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI17_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -592,10 +592,10 @@ define <vscale x 1 x i16> @ceil_nxv1f64_to_si16(<vscale x 1 x double> %x) {
 ; RV32-LABEL: ceil_nxv1f64_to_si16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI18_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI18_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -611,10 +611,10 @@ define <vscale x 1 x i16> @ceil_nxv1f64_to_si16(<vscale x 1 x double> %x) {
 ; RV64-LABEL: ceil_nxv1f64_to_si16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI18_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI18_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -635,10 +635,10 @@ define <vscale x 1 x i16> @ceil_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
 ; RV32-LABEL: ceil_nxv1f64_to_ui16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI19_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI19_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -654,10 +654,10 @@ define <vscale x 1 x i16> @ceil_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
 ; RV64-LABEL: ceil_nxv1f64_to_ui16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI19_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI19_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -780,10 +780,10 @@ define <vscale x 4 x i8> @ceil_nxv4f64_to_si8(<vscale x 4 x double> %x) {
 ; RV32-LABEL: ceil_nxv4f64_to_si8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI24_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI24_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vfabs.v v12, v8
-; RV32-NEXT:    vmflt.vf v0, v12, ft0
+; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -801,10 +801,10 @@ define <vscale x 4 x i8> @ceil_nxv4f64_to_si8(<vscale x 4 x double> %x) {
 ; RV64-LABEL: ceil_nxv4f64_to_si8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI24_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI24_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vfabs.v v12, v8
-; RV64-NEXT:    vmflt.vf v0, v12, ft0
+; RV64-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -827,10 +827,10 @@ define <vscale x 4 x i8> @ceil_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
 ; RV32-LABEL: ceil_nxv4f64_to_ui8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI25_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI25_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vfabs.v v12, v8
-; RV32-NEXT:    vmflt.vf v0, v12, ft0
+; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -848,10 +848,10 @@ define <vscale x 4 x i8> @ceil_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
 ; RV64-LABEL: ceil_nxv4f64_to_ui8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI25_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI25_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vfabs.v v12, v8
-; RV64-NEXT:    vmflt.vf v0, v12, ft0
+; RV64-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -874,10 +874,10 @@ define <vscale x 4 x i16> @ceil_nxv4f64_to_si16(<vscale x 4 x double> %x) {
 ; RV32-LABEL: ceil_nxv4f64_to_si16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI26_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI26_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vfabs.v v12, v8
-; RV32-NEXT:    vmflt.vf v0, v12, ft0
+; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -893,10 +893,10 @@ define <vscale x 4 x i16> @ceil_nxv4f64_to_si16(<vscale x 4 x double> %x) {
 ; RV64-LABEL: ceil_nxv4f64_to_si16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI26_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI26_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vfabs.v v12, v8
-; RV64-NEXT:    vmflt.vf v0, v12, ft0
+; RV64-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -917,10 +917,10 @@ define <vscale x 4 x i16> @ceil_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
 ; RV32-LABEL: ceil_nxv4f64_to_ui16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI27_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI27_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vfabs.v v12, v8
-; RV32-NEXT:    vmflt.vf v0, v12, ft0
+; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -936,10 +936,10 @@ define <vscale x 4 x i16> @ceil_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
 ; RV64-LABEL: ceil_nxv4f64_to_ui16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI27_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI27_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vfabs.v v12, v8
-; RV64-NEXT:    vmflt.vf v0, v12, ft0
+; RV64-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index 61b63aef5182..bdf0a3d30bd3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -518,10 +518,10 @@ define float @extractelt_fadd_nxv4f32_splat(<vscale x 4 x float> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    lui a0, 263168
-; CHECK-NEXT:    fmv.w.x ft1, a0
-; CHECK-NEXT:    fadd.s fa0, ft0, ft1
+; CHECK-NEXT:    fmv.w.x fa4, a0
+; CHECK-NEXT:    fadd.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
   %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
@@ -535,10 +535,10 @@ define float @extractelt_fsub_nxv4f32_splat(<vscale x 4 x float> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    lui a0, 263168
-; CHECK-NEXT:    fmv.w.x ft1, a0
-; CHECK-NEXT:    fsub.s fa0, ft1, ft0
+; CHECK-NEXT:    fmv.w.x fa4, a0
+; CHECK-NEXT:    fsub.s fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
   %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
@@ -552,10 +552,10 @@ define float @extractelt_fmul_nxv4f32_splat(<vscale x 4 x float> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    lui a0, 263168
-; CHECK-NEXT:    fmv.w.x ft1, a0
-; CHECK-NEXT:    fmul.s fa0, ft0, ft1
+; CHECK-NEXT:    fmv.w.x fa4, a0
+; CHECK-NEXT:    fmul.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
   %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
@@ -568,10 +568,10 @@ define float @extractelt_fdiv_nxv4f32_splat(<vscale x 4 x float> %x) {
 ; CHECK-LABEL: extractelt_fdiv_nxv4f32_splat:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 0, e32, m2, ta, ma
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    lui a0, 263168
-; CHECK-NEXT:    fmv.w.x ft1, a0
-; CHECK-NEXT:    fdiv.s fa0, ft0, ft1
+; CHECK-NEXT:    fmv.w.x fa4, a0
+; CHECK-NEXT:    fdiv.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
   %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
index aba0682b8aff..800392e6de85 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
@@ -8,10 +8,10 @@ define <vscale x 1 x half> @ceil_nxv1f16(<vscale x 1 x half> %x) {
 ; CHECK-LABEL: ceil_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -28,10 +28,10 @@ define <vscale x 2 x half> @ceil_nxv2f16(<vscale x 2 x half> %x) {
 ; CHECK-LABEL: ceil_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -48,10 +48,10 @@ define <vscale x 4 x half> @ceil_nxv4f16(<vscale x 4 x half> %x) {
 ; CHECK-LABEL: ceil_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -68,10 +68,10 @@ define <vscale x 8 x half> @ceil_nxv8f16(<vscale x 8 x half> %x) {
 ; CHECK-LABEL: ceil_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -88,10 +88,10 @@ define <vscale x 16 x half> @ceil_nxv16f16(<vscale x 16 x half> %x) {
 ; CHECK-LABEL: ceil_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -108,10 +108,10 @@ define <vscale x 32 x half> @ceil_nxv32f16(<vscale x 32 x half> %x) {
 ; CHECK-LABEL: ceil_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -130,8 +130,8 @@ define <vscale x 1 x float> @ceil_nxv1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -150,8 +150,8 @@ define <vscale x 2 x float> @ceil_nxv2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -170,8 +170,8 @@ define <vscale x 4 x float> @ceil_nxv4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -190,8 +190,8 @@ define <vscale x 8 x float> @ceil_nxv8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -210,8 +210,8 @@ define <vscale x 16 x float> @ceil_nxv16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -228,10 +228,10 @@ define <vscale x 1 x double> @ceil_nxv1f64(<vscale x 1 x double> %x) {
 ; CHECK-LABEL: ceil_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -248,10 +248,10 @@ define <vscale x 2 x double> @ceil_nxv2f64(<vscale x 2 x double> %x) {
 ; CHECK-LABEL: ceil_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -268,10 +268,10 @@ define <vscale x 4 x double> @ceil_nxv4f64(<vscale x 4 x double> %x) {
 ; CHECK-LABEL: ceil_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -288,10 +288,10 @@ define <vscale x 8 x double> @ceil_nxv8f64(<vscale x 8 x double> %x) {
 ; CHECK-LABEL: ceil_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI14_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
index 467ee93c8626..1e87abb347a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
@@ -8,10 +8,10 @@ define <vscale x 1 x half> @floor_nxv1f16(<vscale x 1 x half> %x) {
 ; CHECK-LABEL: floor_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -28,10 +28,10 @@ define <vscale x 2 x half> @floor_nxv2f16(<vscale x 2 x half> %x) {
 ; CHECK-LABEL: floor_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -48,10 +48,10 @@ define <vscale x 4 x half> @floor_nxv4f16(<vscale x 4 x half> %x) {
 ; CHECK-LABEL: floor_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -68,10 +68,10 @@ define <vscale x 8 x half> @floor_nxv8f16(<vscale x 8 x half> %x) {
 ; CHECK-LABEL: floor_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -88,10 +88,10 @@ define <vscale x 16 x half> @floor_nxv16f16(<vscale x 16 x half> %x) {
 ; CHECK-LABEL: floor_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -108,10 +108,10 @@ define <vscale x 32 x half> @floor_nxv32f16(<vscale x 32 x half> %x) {
 ; CHECK-LABEL: floor_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -130,8 +130,8 @@ define <vscale x 1 x float> @floor_nxv1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -150,8 +150,8 @@ define <vscale x 2 x float> @floor_nxv2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -170,8 +170,8 @@ define <vscale x 4 x float> @floor_nxv4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -190,8 +190,8 @@ define <vscale x 8 x float> @floor_nxv8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -210,8 +210,8 @@ define <vscale x 16 x float> @floor_nxv16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -228,10 +228,10 @@ define <vscale x 1 x double> @floor_nxv1f64(<vscale x 1 x double> %x) {
 ; CHECK-LABEL: floor_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -248,10 +248,10 @@ define <vscale x 2 x double> @floor_nxv2f64(<vscale x 2 x double> %x) {
 ; CHECK-LABEL: floor_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -268,10 +268,10 @@ define <vscale x 4 x double> @floor_nxv4f64(<vscale x 4 x double> %x) {
 ; CHECK-LABEL: floor_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -288,10 +288,10 @@ define <vscale x 8 x double> @floor_nxv8f64(<vscale x 8 x double> %x) {
 ; CHECK-LABEL: floor_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI14_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index ae80def91fe0..3baed1601a29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -10,11 +10,11 @@ define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_ceil_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,13 +31,13 @@ define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -58,11 +58,11 @@ define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_ceil_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -79,13 +79,13 @@ define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -106,11 +106,11 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_ceil_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -127,13 +127,13 @@ define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -155,11 +155,11 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -177,14 +177,14 @@ define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -208,9 +208,9 @@ define <2 x float> @vp_ceil_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -231,9 +231,9 @@ define <2 x float> @vp_ceil_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -256,9 +256,9 @@ define <4 x float> @vp_ceil_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -279,9 +279,9 @@ define <4 x float> @vp_ceil_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -305,9 +305,9 @@ define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -330,9 +330,9 @@ define <8 x float> @vp_ceil_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -357,9 +357,9 @@ define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -382,9 +382,9 @@ define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -406,11 +406,11 @@ define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %e
 ; CHECK-LABEL: vp_ceil_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI16_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -427,13 +427,13 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_ceil_v2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI17_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -455,11 +455,11 @@ define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %e
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI18_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -477,14 +477,14 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_ceil_v4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI19_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -507,11 +507,11 @@ define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI20_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -529,14 +529,14 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_ceil_v8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI21_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v12
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -559,11 +559,11 @@ define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroex
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -581,14 +581,14 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 ; CHECK-LABEL: vp_ceil_v15f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -611,11 +611,11 @@ define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroex
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -633,14 +633,14 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 ; CHECK-LABEL: vp_ceil_v16f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -682,12 +682,12 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    li a1, 16
 ; CHECK-NEXT:  .LBB26_2:
 ; CHECK-NEXT:    lui a2, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v25, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a1, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -713,7 +713,7 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -749,13 +749,13 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    sub sp, sp, a2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a1, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -775,7 +775,7 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll
index 8cad9c26c488..e39d66e61ea1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll
@@ -134,14 +134,14 @@ define void @fadd_v4f32(ptr %x, ptr %y) {
 define void @fadd_v2f64(ptr %x, ptr %y) {
 ; CHECK-LABEL: fadd_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fld ft0, 8(a0)
-; CHECK-NEXT:    fld ft1, 0(a0)
-; CHECK-NEXT:    fld ft2, 0(a1)
-; CHECK-NEXT:    fld ft3, 8(a1)
-; CHECK-NEXT:    fadd.d ft1, ft1, ft2
-; CHECK-NEXT:    fadd.d ft0, ft0, ft3
-; CHECK-NEXT:    fsd ft0, 8(a0)
-; CHECK-NEXT:    fsd ft1, 0(a0)
+; CHECK-NEXT:    fld fa5, 8(a0)
+; CHECK-NEXT:    fld fa4, 0(a0)
+; CHECK-NEXT:    fld fa3, 0(a1)
+; CHECK-NEXT:    fld fa2, 8(a1)
+; CHECK-NEXT:    fadd.d fa4, fa4, fa3
+; CHECK-NEXT:    fadd.d fa5, fa5, fa2
+; CHECK-NEXT:    fsd fa5, 8(a0)
+; CHECK-NEXT:    fsd fa4, 0(a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, ptr %x
   %b = load <2 x double>, ptr %y
@@ -171,10 +171,10 @@ define void @fadd_v2f32(ptr %x, ptr %y) {
 define void @fadd_v1f64(ptr %x, ptr %y) {
 ; CHECK-LABEL: fadd_v1f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fld ft0, 0(a0)
-; CHECK-NEXT:    fld ft1, 0(a1)
-; CHECK-NEXT:    fadd.d ft0, ft0, ft1
-; CHECK-NEXT:    fsd ft0, 0(a0)
+; CHECK-NEXT:    fld fa5, 0(a0)
+; CHECK-NEXT:    fld fa4, 0(a1)
+; CHECK-NEXT:    fadd.d fa5, fa5, fa4
+; CHECK-NEXT:    fsd fa5, 0(a0)
 ; CHECK-NEXT:    ret
   %a = load <1 x double>, ptr %x
   %b = load <1 x double>, ptr %y

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index 54bc24c73636..b860916a3adf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -804,10 +804,10 @@ define float @extractelt_fadd_v4f32(<4 x float> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    lui a0, 267520
-; CHECK-NEXT:    fmv.w.x ft1, a0
-; CHECK-NEXT:    fadd.s fa0, ft0, ft1
+; CHECK-NEXT:    fmv.w.x fa4, a0
+; CHECK-NEXT:    fadd.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %bo = fadd <4 x float> %x, <float 11.0, float 12.0, float 13.0, float 14.0>
   %ext = extractelement <4 x float> %bo, i32 2
@@ -819,10 +819,10 @@ define float @extractelt_fsub_v4f32(<4 x float> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    lui a0, 267520
-; CHECK-NEXT:    fmv.w.x ft1, a0
-; CHECK-NEXT:    fsub.s fa0, ft1, ft0
+; CHECK-NEXT:    fmv.w.x fa4, a0
+; CHECK-NEXT:    fsub.s fa0, fa4, fa5
 ; CHECK-NEXT:    ret
   %bo = fsub <4 x float> <float 11.0, float 12.0, float 13.0, float 14.0>, %x
   %ext = extractelement <4 x float> %bo, i32 2
@@ -834,10 +834,10 @@ define float @extractelt_fmul_v4f32(<4 x float> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    lui a0, 267520
-; CHECK-NEXT:    fmv.w.x ft1, a0
-; CHECK-NEXT:    fmul.s fa0, ft0, ft1
+; CHECK-NEXT:    fmv.w.x fa4, a0
+; CHECK-NEXT:    fmul.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %bo = fmul <4 x float> %x, <float 11.0, float 12.0, float 13.0, float 14.0>
   %ext = extractelement <4 x float> %bo, i32 2
@@ -849,10 +849,10 @@ define float @extractelt_fdiv_v4f32(<4 x float> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    lui a0, 267520
-; CHECK-NEXT:    fmv.w.x ft1, a0
-; CHECK-NEXT:    fdiv.s fa0, ft0, ft1
+; CHECK-NEXT:    fmv.w.x fa4, a0
+; CHECK-NEXT:    fdiv.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
   %bo = fdiv <4 x float> %x, <float 11.0, float 12.0, float 13.0, float 14.0>
   %ext = extractelement <4 x float> %bo, i32 2

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index c4a7a7e14980..6657184f54e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -10,11 +10,11 @@ define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_floor_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,13 +31,13 @@ define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -58,11 +58,11 @@ define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_floor_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -79,13 +79,13 @@ define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -106,11 +106,11 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_floor_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -127,13 +127,13 @@ define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -155,11 +155,11 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -177,14 +177,14 @@ define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_floor_v16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -208,9 +208,9 @@ define <2 x float> @vp_floor_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -231,9 +231,9 @@ define <2 x float> @vp_floor_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -256,9 +256,9 @@ define <4 x float> @vp_floor_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -279,9 +279,9 @@ define <4 x float> @vp_floor_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -305,9 +305,9 @@ define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -330,9 +330,9 @@ define <8 x float> @vp_floor_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -357,9 +357,9 @@ define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -382,9 +382,9 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -406,11 +406,11 @@ define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ; CHECK-LABEL: vp_floor_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI16_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -427,13 +427,13 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_floor_v2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI17_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -455,11 +455,11 @@ define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI18_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -477,14 +477,14 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_floor_v4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI19_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -507,11 +507,11 @@ define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI20_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -529,14 +529,14 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_floor_v8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI21_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v12
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -559,11 +559,11 @@ define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -581,14 +581,14 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-LABEL: vp_floor_v15f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -611,11 +611,11 @@ define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -633,14 +633,14 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-LABEL: vp_floor_v16f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -682,12 +682,12 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    li a1, 16
 ; CHECK-NEXT:  .LBB26_2:
 ; CHECK-NEXT:    lui a2, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v25, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a1, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -713,7 +713,7 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -749,13 +749,13 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    sub sp, sp, a2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a1, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -775,7 +775,7 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index 1efe3be59c5e..1c685aaa55b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -33,22 +33,22 @@ define <4 x double> @shuffle_fv_v4f64(<4 x double> %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    li a0, 9
 ; RV32-NEXT:    lui a1, %hi(.LCPI2_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI2_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI2_0)(a1)
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT:    vfmerge.vfm v8, v8, ft0, v0
+; RV32-NEXT:    vfmerge.vfm v8, v8, fa5, v0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: shuffle_fv_v4f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI2_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI2_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI2_0)(a0)
 ; RV64-NEXT:    li a0, 9
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vfmerge.vfm v8, v8, ft0, v0
+; RV64-NEXT:    vfmerge.vfm v8, v8, fa5, v0
 ; RV64-NEXT:    ret
   %s = shufflevector <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x double> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
   ret <4 x double> %s
@@ -59,22 +59,22 @@ define <4 x double> @shuffle_vf_v4f64(<4 x double> %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    li a0, 6
 ; RV32-NEXT:    lui a1, %hi(.LCPI3_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI3_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI3_0)(a1)
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT:    vfmerge.vfm v8, v8, ft0, v0
+; RV32-NEXT:    vfmerge.vfm v8, v8, fa5, v0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: shuffle_vf_v4f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI3_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI3_0)(a0)
 ; RV64-NEXT:    li a0, 6
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vfmerge.vfm v8, v8, ft0, v0
+; RV64-NEXT:    vfmerge.vfm v8, v8, fa5, v0
 ; RV64-NEXT:    ret
   %s = shufflevector <4 x double> %x, <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
   ret <4 x double> %s

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
index d6925d4a7006..9761196f6e42 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
@@ -245,10 +245,10 @@ define void @splat_negzero_v4f32(ptr %x) {
 define void @splat_negzero_v2f64(ptr %x) {
 ; CHECK-RV32-LABEL: splat_negzero_v2f64:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    fcvt.d.w ft0, zero
-; CHECK-RV32-NEXT:    fneg.d ft0, ft0
+; CHECK-RV32-NEXT:    fcvt.d.w fa5, zero
+; CHECK-RV32-NEXT:    fneg.d fa5, fa5
 ; CHECK-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-RV32-NEXT:    vfmv.v.f v8, ft0
+; CHECK-RV32-NEXT:    vfmv.v.f v8, fa5
 ; CHECK-RV32-NEXT:    vse64.v v8, (a0)
 ; CHECK-RV32-NEXT:    ret
 ;
@@ -317,10 +317,10 @@ define void @splat_negzero_v8f32(ptr %x) {
 define void @splat_negzero_v4f64(ptr %x) {
 ; RV32-LMULMAX2-LABEL: splat_negzero_v4f64:
 ; RV32-LMULMAX2:       # %bb.0:
-; RV32-LMULMAX2-NEXT:    fcvt.d.w ft0, zero
-; RV32-LMULMAX2-NEXT:    fneg.d ft0, ft0
+; RV32-LMULMAX2-NEXT:    fcvt.d.w fa5, zero
+; RV32-LMULMAX2-NEXT:    fneg.d fa5, fa5
 ; RV32-LMULMAX2-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV32-LMULMAX2-NEXT:    vfmv.v.f v8, ft0
+; RV32-LMULMAX2-NEXT:    vfmv.v.f v8, fa5
 ; RV32-LMULMAX2-NEXT:    vse64.v v8, (a0)
 ; RV32-LMULMAX2-NEXT:    ret
 ;
@@ -335,10 +335,10 @@ define void @splat_negzero_v4f64(ptr %x) {
 ;
 ; RV32-LMULMAX1-LABEL: splat_negzero_v4f64:
 ; RV32-LMULMAX1:       # %bb.0:
-; RV32-LMULMAX1-NEXT:    fcvt.d.w ft0, zero
-; RV32-LMULMAX1-NEXT:    fneg.d ft0, ft0
+; RV32-LMULMAX1-NEXT:    fcvt.d.w fa5, zero
+; RV32-LMULMAX1-NEXT:    fneg.d fa5, fa5
 ; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV32-LMULMAX1-NEXT:    vfmv.v.f v8, ft0
+; RV32-LMULMAX1-NEXT:    vfmv.v.f v8, fa5
 ; RV32-LMULMAX1-NEXT:    addi a1, a0, 16
 ; RV32-LMULMAX1-NEXT:    vse64.v v8, (a1)
 ; RV32-LMULMAX1-NEXT:    vse64.v v8, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index e675cb3bfb84..aaf5820ca147 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -1973,9 +1973,9 @@ define void @trunc_v8f16(ptr %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI91_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI91_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI91_0)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -1996,8 +1996,8 @@ define void @trunc_v4f32(ptr %x) {
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a1, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a1
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
@@ -2017,9 +2017,9 @@ define void @trunc_v2f64(ptr %x) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI93_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI93_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI93_0)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -2039,9 +2039,9 @@ define void @ceil_v8f16(ptr %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI94_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI94_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI94_0)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a1, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a1
@@ -2064,8 +2064,8 @@ define void @ceil_v4f32(ptr %x) {
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a1, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a1
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a1, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a1
@@ -2087,9 +2087,9 @@ define void @ceil_v2f64(ptr %x) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI96_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI96_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI96_0)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a1, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a1
@@ -2111,9 +2111,9 @@ define void @floor_v8f16(ptr %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI97_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI97_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI97_0)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a1, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a1
@@ -2136,8 +2136,8 @@ define void @floor_v4f32(ptr %x) {
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a1, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a1
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a1, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a1
@@ -2159,9 +2159,9 @@ define void @floor_v2f64(ptr %x) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI99_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI99_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI99_0)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a1, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a1
@@ -2183,9 +2183,9 @@ define void @round_v8f16(ptr %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI100_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI100_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI100_0)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a1, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a1
@@ -2208,8 +2208,8 @@ define void @round_v4f32(ptr %x) {
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a1, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a1
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a1, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a1
@@ -2231,9 +2231,9 @@ define void @round_v2f64(ptr %x) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI102_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI102_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI102_0)(a1)
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a1, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
index 03a89c7f2e39..ab3e01e2ff61 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
@@ -193,26 +193,26 @@ define void @fp2si_v2f64_v2i8(ptr %x, ptr %y) {
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    lui a0, %hi(.LCPI10_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI10_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI10_0)(a0)
 ; RV32-NEXT:    lui a0, %hi(.LCPI10_1)
-; RV32-NEXT:    fld ft1, %lo(.LCPI10_1)(a0)
+; RV32-NEXT:    fld fa4, %lo(.LCPI10_1)(a0)
 ; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    vfmv.f.s ft2, v9
-; RV32-NEXT:    fmax.d ft3, ft2, ft0
-; RV32-NEXT:    fmin.d ft3, ft3, ft1
-; RV32-NEXT:    fcvt.w.d a0, ft3, rtz
-; RV32-NEXT:    feq.d a2, ft2, ft2
+; RV32-NEXT:    vfmv.f.s fa3, v9
+; RV32-NEXT:    fmax.d fa2, fa3, fa5
+; RV32-NEXT:    fmin.d fa2, fa2, fa4
+; RV32-NEXT:    fcvt.w.d a0, fa2, rtz
+; RV32-NEXT:    feq.d a2, fa3, fa3
 ; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    and a0, a2, a0
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; RV32-NEXT:    vmv.v.x v9, a0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s ft2, v8
-; RV32-NEXT:    fmax.d ft0, ft2, ft0
-; RV32-NEXT:    fmin.d ft0, ft0, ft1
-; RV32-NEXT:    fcvt.w.d a0, ft0, rtz
-; RV32-NEXT:    feq.d a2, ft2, ft2
+; RV32-NEXT:    vfmv.f.s fa3, v8
+; RV32-NEXT:    fmax.d fa5, fa3, fa5
+; RV32-NEXT:    fmin.d fa5, fa5, fa4
+; RV32-NEXT:    fcvt.w.d a0, fa5, rtz
+; RV32-NEXT:    feq.d a2, fa3, fa3
 ; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    and a0, a2, a0
@@ -227,26 +227,26 @@ define void @fp2si_v2f64_v2i8(ptr %x, ptr %y) {
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    lui a0, %hi(.LCPI10_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI10_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI10_0)(a0)
 ; RV64-NEXT:    lui a0, %hi(.LCPI10_1)
-; RV64-NEXT:    fld ft1, %lo(.LCPI10_1)(a0)
+; RV64-NEXT:    fld fa4, %lo(.LCPI10_1)(a0)
 ; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    vfmv.f.s ft2, v9
-; RV64-NEXT:    fmax.d ft3, ft2, ft0
-; RV64-NEXT:    fmin.d ft3, ft3, ft1
-; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
-; RV64-NEXT:    feq.d a2, ft2, ft2
+; RV64-NEXT:    vfmv.f.s fa3, v9
+; RV64-NEXT:    fmax.d fa2, fa3, fa5
+; RV64-NEXT:    fmin.d fa2, fa2, fa4
+; RV64-NEXT:    fcvt.l.d a0, fa2, rtz
+; RV64-NEXT:    feq.d a2, fa3, fa3
 ; RV64-NEXT:    seqz a2, a2
 ; RV64-NEXT:    addi a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; RV64-NEXT:    vmv.v.x v9, a0
 ; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV64-NEXT:    vfmv.f.s ft2, v8
-; RV64-NEXT:    fmax.d ft0, ft2, ft0
-; RV64-NEXT:    fmin.d ft0, ft0, ft1
-; RV64-NEXT:    fcvt.l.d a0, ft0, rtz
-; RV64-NEXT:    feq.d a2, ft2, ft2
+; RV64-NEXT:    vfmv.f.s fa3, v8
+; RV64-NEXT:    fmax.d fa5, fa3, fa5
+; RV64-NEXT:    fmin.d fa5, fa5, fa4
+; RV64-NEXT:    fcvt.l.d a0, fa5, rtz
+; RV64-NEXT:    feq.d a2, fa3, fa3
 ; RV64-NEXT:    seqz a2, a2
 ; RV64-NEXT:    addi a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
@@ -267,18 +267,18 @@ define void @fp2ui_v2f64_v2i8(ptr %x, ptr %y) {
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    lui a0, %hi(.LCPI11_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
-; RV32-NEXT:    vfmv.f.s ft1, v8
-; RV32-NEXT:    fcvt.d.w ft2, zero
-; RV32-NEXT:    fmax.d ft1, ft1, ft2
-; RV32-NEXT:    fmin.d ft1, ft1, ft0
-; RV32-NEXT:    fcvt.wu.d a0, ft1, rtz
+; RV32-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT:    vfmv.f.s fa4, v8
+; RV32-NEXT:    fcvt.d.w fa3, zero
+; RV32-NEXT:    fmax.d fa4, fa4, fa3
+; RV32-NEXT:    fmin.d fa4, fa4, fa5
+; RV32-NEXT:    fcvt.wu.d a0, fa4, rtz
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s ft1, v8
-; RV32-NEXT:    fmax.d ft1, ft1, ft2
-; RV32-NEXT:    fmin.d ft0, ft1, ft0
-; RV32-NEXT:    fcvt.wu.d a2, ft0, rtz
+; RV32-NEXT:    vfmv.f.s fa4, v8
+; RV32-NEXT:    fmax.d fa4, fa4, fa3
+; RV32-NEXT:    fmin.d fa5, fa4, fa5
+; RV32-NEXT:    fcvt.wu.d a2, fa5, rtz
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a2
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf8, tu, ma
@@ -291,18 +291,18 @@ define void @fp2ui_v2f64_v2i8(ptr %x, ptr %y) {
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    lui a0, %hi(.LCPI11_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
-; RV64-NEXT:    vfmv.f.s ft1, v8
-; RV64-NEXT:    fmv.d.x ft2, zero
-; RV64-NEXT:    fmax.d ft1, ft1, ft2
-; RV64-NEXT:    fmin.d ft1, ft1, ft0
-; RV64-NEXT:    fcvt.lu.d a0, ft1, rtz
+; RV64-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
+; RV64-NEXT:    vfmv.f.s fa4, v8
+; RV64-NEXT:    fmv.d.x fa3, zero
+; RV64-NEXT:    fmax.d fa4, fa4, fa3
+; RV64-NEXT:    fmin.d fa4, fa4, fa5
+; RV64-NEXT:    fcvt.lu.d a0, fa4, rtz
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    vfmv.f.s ft1, v8
-; RV64-NEXT:    fmax.d ft1, ft1, ft2
-; RV64-NEXT:    fmin.d ft0, ft1, ft0
-; RV64-NEXT:    fcvt.lu.d a2, ft0, rtz
+; RV64-NEXT:    vfmv.f.s fa4, v8
+; RV64-NEXT:    fmax.d fa4, fa4, fa3
+; RV64-NEXT:    fmin.d fa5, fa4, fa5
+; RV64-NEXT:    fcvt.lu.d a2, fa5, rtz
 ; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; RV64-NEXT:    vmv.v.x v8, a2
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf8, tu, ma
@@ -325,85 +325,85 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    lui a0, %hi(.LCPI12_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; RV32-NEXT:    lui a0, %hi(.LCPI12_1)
-; RV32-NEXT:    fld ft1, %lo(.LCPI12_1)(a0)
-; RV32-NEXT:    vfmv.f.s ft2, v8
-; RV32-NEXT:    fmax.d ft3, ft2, ft0
-; RV32-NEXT:    fmin.d ft3, ft3, ft1
-; RV32-NEXT:    fcvt.w.d a0, ft3, rtz
-; RV32-NEXT:    feq.d a2, ft2, ft2
+; RV32-NEXT:    fld fa4, %lo(.LCPI12_1)(a0)
+; RV32-NEXT:    vfmv.f.s fa3, v8
+; RV32-NEXT:    fmax.d fa2, fa3, fa5
+; RV32-NEXT:    fmin.d fa2, fa2, fa4
+; RV32-NEXT:    fcvt.w.d a0, fa2, rtz
+; RV32-NEXT:    feq.d a2, fa3, fa3
 ; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    and a0, a2, a0
 ; RV32-NEXT:    sb a0, 8(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
 ; RV32-NEXT:    vslidedown.vi v12, v8, 7
-; RV32-NEXT:    vfmv.f.s ft2, v12
-; RV32-NEXT:    fmax.d ft3, ft2, ft0
-; RV32-NEXT:    fmin.d ft3, ft3, ft1
-; RV32-NEXT:    fcvt.w.d a0, ft3, rtz
-; RV32-NEXT:    feq.d a2, ft2, ft2
+; RV32-NEXT:    vfmv.f.s fa3, v12
+; RV32-NEXT:    fmax.d fa2, fa3, fa5
+; RV32-NEXT:    fmin.d fa2, fa2, fa4
+; RV32-NEXT:    fcvt.w.d a0, fa2, rtz
+; RV32-NEXT:    feq.d a2, fa3, fa3
 ; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    and a0, a2, a0
 ; RV32-NEXT:    sb a0, 15(sp)
 ; RV32-NEXT:    vslidedown.vi v12, v8, 6
-; RV32-NEXT:    vfmv.f.s ft2, v12
-; RV32-NEXT:    fmax.d ft3, ft2, ft0
-; RV32-NEXT:    fmin.d ft3, ft3, ft1
-; RV32-NEXT:    fcvt.w.d a0, ft3, rtz
-; RV32-NEXT:    feq.d a2, ft2, ft2
+; RV32-NEXT:    vfmv.f.s fa3, v12
+; RV32-NEXT:    fmax.d fa2, fa3, fa5
+; RV32-NEXT:    fmin.d fa2, fa2, fa4
+; RV32-NEXT:    fcvt.w.d a0, fa2, rtz
+; RV32-NEXT:    feq.d a2, fa3, fa3
 ; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    and a0, a2, a0
 ; RV32-NEXT:    sb a0, 14(sp)
 ; RV32-NEXT:    vslidedown.vi v12, v8, 5
-; RV32-NEXT:    vfmv.f.s ft2, v12
-; RV32-NEXT:    fmax.d ft3, ft2, ft0
-; RV32-NEXT:    fmin.d ft3, ft3, ft1
-; RV32-NEXT:    fcvt.w.d a0, ft3, rtz
-; RV32-NEXT:    feq.d a2, ft2, ft2
+; RV32-NEXT:    vfmv.f.s fa3, v12
+; RV32-NEXT:    fmax.d fa2, fa3, fa5
+; RV32-NEXT:    fmin.d fa2, fa2, fa4
+; RV32-NEXT:    fcvt.w.d a0, fa2, rtz
+; RV32-NEXT:    feq.d a2, fa3, fa3
 ; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    and a0, a2, a0
 ; RV32-NEXT:    sb a0, 13(sp)
 ; RV32-NEXT:    vslidedown.vi v12, v8, 4
-; RV32-NEXT:    vfmv.f.s ft2, v12
-; RV32-NEXT:    fmax.d ft3, ft2, ft0
-; RV32-NEXT:    fmin.d ft3, ft3, ft1
-; RV32-NEXT:    fcvt.w.d a0, ft3, rtz
-; RV32-NEXT:    feq.d a2, ft2, ft2
+; RV32-NEXT:    vfmv.f.s fa3, v12
+; RV32-NEXT:    fmax.d fa2, fa3, fa5
+; RV32-NEXT:    fmin.d fa2, fa2, fa4
+; RV32-NEXT:    fcvt.w.d a0, fa2, rtz
+; RV32-NEXT:    feq.d a2, fa3, fa3
 ; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    and a0, a2, a0
 ; RV32-NEXT:    sb a0, 12(sp)
 ; RV32-NEXT:    vslidedown.vi v12, v8, 3
-; RV32-NEXT:    vfmv.f.s ft2, v12
-; RV32-NEXT:    fmax.d ft3, ft2, ft0
-; RV32-NEXT:    fmin.d ft3, ft3, ft1
-; RV32-NEXT:    fcvt.w.d a0, ft3, rtz
-; RV32-NEXT:    feq.d a2, ft2, ft2
+; RV32-NEXT:    vfmv.f.s fa3, v12
+; RV32-NEXT:    fmax.d fa2, fa3, fa5
+; RV32-NEXT:    fmin.d fa2, fa2, fa4
+; RV32-NEXT:    fcvt.w.d a0, fa2, rtz
+; RV32-NEXT:    feq.d a2, fa3, fa3
 ; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    and a0, a2, a0
 ; RV32-NEXT:    sb a0, 11(sp)
 ; RV32-NEXT:    vslidedown.vi v12, v8, 2
-; RV32-NEXT:    vfmv.f.s ft2, v12
-; RV32-NEXT:    fmax.d ft3, ft2, ft0
-; RV32-NEXT:    fmin.d ft3, ft3, ft1
-; RV32-NEXT:    fcvt.w.d a0, ft3, rtz
-; RV32-NEXT:    feq.d a2, ft2, ft2
+; RV32-NEXT:    vfmv.f.s fa3, v12
+; RV32-NEXT:    fmax.d fa2, fa3, fa5
+; RV32-NEXT:    fmin.d fa2, fa2, fa4
+; RV32-NEXT:    fcvt.w.d a0, fa2, rtz
+; RV32-NEXT:    feq.d a2, fa3, fa3
 ; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    and a0, a2, a0
 ; RV32-NEXT:    sb a0, 10(sp)
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s ft2, v8
-; RV32-NEXT:    fmax.d ft0, ft2, ft0
-; RV32-NEXT:    fmin.d ft0, ft0, ft1
-; RV32-NEXT:    fcvt.w.d a0, ft0, rtz
-; RV32-NEXT:    feq.d a2, ft2, ft2
+; RV32-NEXT:    vfmv.f.s fa3, v8
+; RV32-NEXT:    fmax.d fa5, fa3, fa5
+; RV32-NEXT:    fmin.d fa5, fa5, fa4
+; RV32-NEXT:    fcvt.w.d a0, fa5, rtz
+; RV32-NEXT:    feq.d a2, fa3, fa3
 ; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    and a0, a2, a0
@@ -422,85 +422,85 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    lui a0, %hi(.LCPI12_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; RV64-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; RV64-NEXT:    lui a0, %hi(.LCPI12_1)
-; RV64-NEXT:    fld ft1, %lo(.LCPI12_1)(a0)
-; RV64-NEXT:    vfmv.f.s ft2, v8
-; RV64-NEXT:    fmax.d ft3, ft2, ft0
-; RV64-NEXT:    fmin.d ft3, ft3, ft1
-; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
-; RV64-NEXT:    feq.d a2, ft2, ft2
+; RV64-NEXT:    fld fa4, %lo(.LCPI12_1)(a0)
+; RV64-NEXT:    vfmv.f.s fa3, v8
+; RV64-NEXT:    fmax.d fa2, fa3, fa5
+; RV64-NEXT:    fmin.d fa2, fa2, fa4
+; RV64-NEXT:    fcvt.l.d a0, fa2, rtz
+; RV64-NEXT:    feq.d a2, fa3, fa3
 ; RV64-NEXT:    seqz a2, a2
 ; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 8(sp)
 ; RV64-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
 ; RV64-NEXT:    vslidedown.vi v12, v8, 7
-; RV64-NEXT:    vfmv.f.s ft2, v12
-; RV64-NEXT:    fmax.d ft3, ft2, ft0
-; RV64-NEXT:    fmin.d ft3, ft3, ft1
-; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
-; RV64-NEXT:    feq.d a2, ft2, ft2
+; RV64-NEXT:    vfmv.f.s fa3, v12
+; RV64-NEXT:    fmax.d fa2, fa3, fa5
+; RV64-NEXT:    fmin.d fa2, fa2, fa4
+; RV64-NEXT:    fcvt.l.d a0, fa2, rtz
+; RV64-NEXT:    feq.d a2, fa3, fa3
 ; RV64-NEXT:    seqz a2, a2
 ; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 15(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 6
-; RV64-NEXT:    vfmv.f.s ft2, v12
-; RV64-NEXT:    fmax.d ft3, ft2, ft0
-; RV64-NEXT:    fmin.d ft3, ft3, ft1
-; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
-; RV64-NEXT:    feq.d a2, ft2, ft2
+; RV64-NEXT:    vfmv.f.s fa3, v12
+; RV64-NEXT:    fmax.d fa2, fa3, fa5
+; RV64-NEXT:    fmin.d fa2, fa2, fa4
+; RV64-NEXT:    fcvt.l.d a0, fa2, rtz
+; RV64-NEXT:    feq.d a2, fa3, fa3
 ; RV64-NEXT:    seqz a2, a2
 ; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 14(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 5
-; RV64-NEXT:    vfmv.f.s ft2, v12
-; RV64-NEXT:    fmax.d ft3, ft2, ft0
-; RV64-NEXT:    fmin.d ft3, ft3, ft1
-; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
-; RV64-NEXT:    feq.d a2, ft2, ft2
+; RV64-NEXT:    vfmv.f.s fa3, v12
+; RV64-NEXT:    fmax.d fa2, fa3, fa5
+; RV64-NEXT:    fmin.d fa2, fa2, fa4
+; RV64-NEXT:    fcvt.l.d a0, fa2, rtz
+; RV64-NEXT:    feq.d a2, fa3, fa3
 ; RV64-NEXT:    seqz a2, a2
 ; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 13(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 4
-; RV64-NEXT:    vfmv.f.s ft2, v12
-; RV64-NEXT:    fmax.d ft3, ft2, ft0
-; RV64-NEXT:    fmin.d ft3, ft3, ft1
-; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
-; RV64-NEXT:    feq.d a2, ft2, ft2
+; RV64-NEXT:    vfmv.f.s fa3, v12
+; RV64-NEXT:    fmax.d fa2, fa3, fa5
+; RV64-NEXT:    fmin.d fa2, fa2, fa4
+; RV64-NEXT:    fcvt.l.d a0, fa2, rtz
+; RV64-NEXT:    feq.d a2, fa3, fa3
 ; RV64-NEXT:    seqz a2, a2
 ; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 12(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 3
-; RV64-NEXT:    vfmv.f.s ft2, v12
-; RV64-NEXT:    fmax.d ft3, ft2, ft0
-; RV64-NEXT:    fmin.d ft3, ft3, ft1
-; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
-; RV64-NEXT:    feq.d a2, ft2, ft2
+; RV64-NEXT:    vfmv.f.s fa3, v12
+; RV64-NEXT:    fmax.d fa2, fa3, fa5
+; RV64-NEXT:    fmin.d fa2, fa2, fa4
+; RV64-NEXT:    fcvt.l.d a0, fa2, rtz
+; RV64-NEXT:    feq.d a2, fa3, fa3
 ; RV64-NEXT:    seqz a2, a2
 ; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 11(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 2
-; RV64-NEXT:    vfmv.f.s ft2, v12
-; RV64-NEXT:    fmax.d ft3, ft2, ft0
-; RV64-NEXT:    fmin.d ft3, ft3, ft1
-; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
-; RV64-NEXT:    feq.d a2, ft2, ft2
+; RV64-NEXT:    vfmv.f.s fa3, v12
+; RV64-NEXT:    fmax.d fa2, fa3, fa5
+; RV64-NEXT:    fmin.d fa2, fa2, fa4
+; RV64-NEXT:    fcvt.l.d a0, fa2, rtz
+; RV64-NEXT:    feq.d a2, fa3, fa3
 ; RV64-NEXT:    seqz a2, a2
 ; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 10(sp)
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    vfmv.f.s ft2, v8
-; RV64-NEXT:    fmax.d ft0, ft2, ft0
-; RV64-NEXT:    fmin.d ft0, ft0, ft1
-; RV64-NEXT:    fcvt.l.d a0, ft0, rtz
-; RV64-NEXT:    feq.d a2, ft2, ft2
+; RV64-NEXT:    vfmv.f.s fa3, v8
+; RV64-NEXT:    fmax.d fa5, fa3, fa5
+; RV64-NEXT:    fmin.d fa5, fa5, fa4
+; RV64-NEXT:    fcvt.l.d a0, fa5, rtz
+; RV64-NEXT:    feq.d a2, fa3, fa3
 ; RV64-NEXT:    seqz a2, a2
 ; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
@@ -527,55 +527,55 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    lui a0, %hi(.LCPI13_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
-; RV32-NEXT:    vfmv.f.s ft1, v8
-; RV32-NEXT:    fcvt.d.w ft2, zero
-; RV32-NEXT:    fmax.d ft1, ft1, ft2
-; RV32-NEXT:    fmin.d ft1, ft1, ft0
-; RV32-NEXT:    fcvt.wu.d a0, ft1, rtz
+; RV32-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT:    vfmv.f.s fa4, v8
+; RV32-NEXT:    fcvt.d.w fa3, zero
+; RV32-NEXT:    fmax.d fa4, fa4, fa3
+; RV32-NEXT:    fmin.d fa4, fa4, fa5
+; RV32-NEXT:    fcvt.wu.d a0, fa4, rtz
 ; RV32-NEXT:    sb a0, 8(sp)
 ; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
 ; RV32-NEXT:    vslidedown.vi v12, v8, 7
-; RV32-NEXT:    vfmv.f.s ft1, v12
-; RV32-NEXT:    fmax.d ft1, ft1, ft2
-; RV32-NEXT:    fmin.d ft1, ft1, ft0
-; RV32-NEXT:    fcvt.wu.d a0, ft1, rtz
+; RV32-NEXT:    vfmv.f.s fa4, v12
+; RV32-NEXT:    fmax.d fa4, fa4, fa3
+; RV32-NEXT:    fmin.d fa4, fa4, fa5
+; RV32-NEXT:    fcvt.wu.d a0, fa4, rtz
 ; RV32-NEXT:    sb a0, 15(sp)
 ; RV32-NEXT:    vslidedown.vi v12, v8, 6
-; RV32-NEXT:    vfmv.f.s ft1, v12
-; RV32-NEXT:    fmax.d ft1, ft1, ft2
-; RV32-NEXT:    fmin.d ft1, ft1, ft0
-; RV32-NEXT:    fcvt.wu.d a0, ft1, rtz
+; RV32-NEXT:    vfmv.f.s fa4, v12
+; RV32-NEXT:    fmax.d fa4, fa4, fa3
+; RV32-NEXT:    fmin.d fa4, fa4, fa5
+; RV32-NEXT:    fcvt.wu.d a0, fa4, rtz
 ; RV32-NEXT:    sb a0, 14(sp)
 ; RV32-NEXT:    vslidedown.vi v12, v8, 5
-; RV32-NEXT:    vfmv.f.s ft1, v12
-; RV32-NEXT:    fmax.d ft1, ft1, ft2
-; RV32-NEXT:    fmin.d ft1, ft1, ft0
-; RV32-NEXT:    fcvt.wu.d a0, ft1, rtz
+; RV32-NEXT:    vfmv.f.s fa4, v12
+; RV32-NEXT:    fmax.d fa4, fa4, fa3
+; RV32-NEXT:    fmin.d fa4, fa4, fa5
+; RV32-NEXT:    fcvt.wu.d a0, fa4, rtz
 ; RV32-NEXT:    sb a0, 13(sp)
 ; RV32-NEXT:    vslidedown.vi v12, v8, 4
-; RV32-NEXT:    vfmv.f.s ft1, v12
-; RV32-NEXT:    fmax.d ft1, ft1, ft2
-; RV32-NEXT:    fmin.d ft1, ft1, ft0
-; RV32-NEXT:    fcvt.wu.d a0, ft1, rtz
+; RV32-NEXT:    vfmv.f.s fa4, v12
+; RV32-NEXT:    fmax.d fa4, fa4, fa3
+; RV32-NEXT:    fmin.d fa4, fa4, fa5
+; RV32-NEXT:    fcvt.wu.d a0, fa4, rtz
 ; RV32-NEXT:    sb a0, 12(sp)
 ; RV32-NEXT:    vslidedown.vi v12, v8, 3
-; RV32-NEXT:    vfmv.f.s ft1, v12
-; RV32-NEXT:    fmax.d ft1, ft1, ft2
-; RV32-NEXT:    fmin.d ft1, ft1, ft0
-; RV32-NEXT:    fcvt.wu.d a0, ft1, rtz
+; RV32-NEXT:    vfmv.f.s fa4, v12
+; RV32-NEXT:    fmax.d fa4, fa4, fa3
+; RV32-NEXT:    fmin.d fa4, fa4, fa5
+; RV32-NEXT:    fcvt.wu.d a0, fa4, rtz
 ; RV32-NEXT:    sb a0, 11(sp)
 ; RV32-NEXT:    vslidedown.vi v12, v8, 2
-; RV32-NEXT:    vfmv.f.s ft1, v12
-; RV32-NEXT:    fmax.d ft1, ft1, ft2
-; RV32-NEXT:    fmin.d ft1, ft1, ft0
-; RV32-NEXT:    fcvt.wu.d a0, ft1, rtz
+; RV32-NEXT:    vfmv.f.s fa4, v12
+; RV32-NEXT:    fmax.d fa4, fa4, fa3
+; RV32-NEXT:    fmin.d fa4, fa4, fa5
+; RV32-NEXT:    fcvt.wu.d a0, fa4, rtz
 ; RV32-NEXT:    sb a0, 10(sp)
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s ft1, v8
-; RV32-NEXT:    fmax.d ft1, ft1, ft2
-; RV32-NEXT:    fmin.d ft0, ft1, ft0
-; RV32-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-NEXT:    vfmv.f.s fa4, v8
+; RV32-NEXT:    fmax.d fa4, fa4, fa3
+; RV32-NEXT:    fmin.d fa5, fa4, fa5
+; RV32-NEXT:    fcvt.wu.d a0, fa5, rtz
 ; RV32-NEXT:    sb a0, 9(sp)
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
@@ -591,55 +591,55 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    lui a0, %hi(.LCPI13_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
-; RV64-NEXT:    vfmv.f.s ft1, v8
-; RV64-NEXT:    fmv.d.x ft2, zero
-; RV64-NEXT:    fmax.d ft1, ft1, ft2
-; RV64-NEXT:    fmin.d ft1, ft1, ft0
-; RV64-NEXT:    fcvt.lu.d a0, ft1, rtz
+; RV64-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
+; RV64-NEXT:    vfmv.f.s fa4, v8
+; RV64-NEXT:    fmv.d.x fa3, zero
+; RV64-NEXT:    fmax.d fa4, fa4, fa3
+; RV64-NEXT:    fmin.d fa4, fa4, fa5
+; RV64-NEXT:    fcvt.lu.d a0, fa4, rtz
 ; RV64-NEXT:    sb a0, 8(sp)
 ; RV64-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
 ; RV64-NEXT:    vslidedown.vi v12, v8, 7
-; RV64-NEXT:    vfmv.f.s ft1, v12
-; RV64-NEXT:    fmax.d ft1, ft1, ft2
-; RV64-NEXT:    fmin.d ft1, ft1, ft0
-; RV64-NEXT:    fcvt.lu.d a0, ft1, rtz
+; RV64-NEXT:    vfmv.f.s fa4, v12
+; RV64-NEXT:    fmax.d fa4, fa4, fa3
+; RV64-NEXT:    fmin.d fa4, fa4, fa5
+; RV64-NEXT:    fcvt.lu.d a0, fa4, rtz
 ; RV64-NEXT:    sb a0, 15(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 6
-; RV64-NEXT:    vfmv.f.s ft1, v12
-; RV64-NEXT:    fmax.d ft1, ft1, ft2
-; RV64-NEXT:    fmin.d ft1, ft1, ft0
-; RV64-NEXT:    fcvt.lu.d a0, ft1, rtz
+; RV64-NEXT:    vfmv.f.s fa4, v12
+; RV64-NEXT:    fmax.d fa4, fa4, fa3
+; RV64-NEXT:    fmin.d fa4, fa4, fa5
+; RV64-NEXT:    fcvt.lu.d a0, fa4, rtz
 ; RV64-NEXT:    sb a0, 14(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 5
-; RV64-NEXT:    vfmv.f.s ft1, v12
-; RV64-NEXT:    fmax.d ft1, ft1, ft2
-; RV64-NEXT:    fmin.d ft1, ft1, ft0
-; RV64-NEXT:    fcvt.lu.d a0, ft1, rtz
+; RV64-NEXT:    vfmv.f.s fa4, v12
+; RV64-NEXT:    fmax.d fa4, fa4, fa3
+; RV64-NEXT:    fmin.d fa4, fa4, fa5
+; RV64-NEXT:    fcvt.lu.d a0, fa4, rtz
 ; RV64-NEXT:    sb a0, 13(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 4
-; RV64-NEXT:    vfmv.f.s ft1, v12
-; RV64-NEXT:    fmax.d ft1, ft1, ft2
-; RV64-NEXT:    fmin.d ft1, ft1, ft0
-; RV64-NEXT:    fcvt.lu.d a0, ft1, rtz
+; RV64-NEXT:    vfmv.f.s fa4, v12
+; RV64-NEXT:    fmax.d fa4, fa4, fa3
+; RV64-NEXT:    fmin.d fa4, fa4, fa5
+; RV64-NEXT:    fcvt.lu.d a0, fa4, rtz
 ; RV64-NEXT:    sb a0, 12(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 3
-; RV64-NEXT:    vfmv.f.s ft1, v12
-; RV64-NEXT:    fmax.d ft1, ft1, ft2
-; RV64-NEXT:    fmin.d ft1, ft1, ft0
-; RV64-NEXT:    fcvt.lu.d a0, ft1, rtz
+; RV64-NEXT:    vfmv.f.s fa4, v12
+; RV64-NEXT:    fmax.d fa4, fa4, fa3
+; RV64-NEXT:    fmin.d fa4, fa4, fa5
+; RV64-NEXT:    fcvt.lu.d a0, fa4, rtz
 ; RV64-NEXT:    sb a0, 11(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 2
-; RV64-NEXT:    vfmv.f.s ft1, v12
-; RV64-NEXT:    fmax.d ft1, ft1, ft2
-; RV64-NEXT:    fmin.d ft1, ft1, ft0
-; RV64-NEXT:    fcvt.lu.d a0, ft1, rtz
+; RV64-NEXT:    vfmv.f.s fa4, v12
+; RV64-NEXT:    fmax.d fa4, fa4, fa3
+; RV64-NEXT:    fmin.d fa4, fa4, fa5
+; RV64-NEXT:    fcvt.lu.d a0, fa4, rtz
 ; RV64-NEXT:    sb a0, 10(sp)
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    vfmv.f.s ft1, v8
-; RV64-NEXT:    fmax.d ft1, ft1, ft2
-; RV64-NEXT:    fmin.d ft0, ft1, ft0
-; RV64-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-NEXT:    vfmv.f.s fa4, v8
+; RV64-NEXT:    fmax.d fa4, fa4, fa3
+; RV64-NEXT:    fmin.d fa5, fa4, fa5
+; RV64-NEXT:    fcvt.lu.d a0, fa5, rtz
 ; RV64-NEXT:    sb a0, 9(sp)
 ; RV64-NEXT:    addi a0, sp, 8
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
index aa460a6620ea..90a6e6e51b6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
@@ -10,10 +10,10 @@ define <1 x half> @round_v1f16(<1 x half> %x) {
 ; CHECK-LABEL: round_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -30,10 +30,10 @@ define <2 x half> @round_v2f16(<2 x half> %x) {
 ; CHECK-LABEL: round_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -50,10 +50,10 @@ define <4 x half> @round_v4f16(<4 x half> %x) {
 ; CHECK-LABEL: round_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -70,10 +70,10 @@ define <8 x half> @round_v8f16(<8 x half> %x) {
 ; CHECK-LABEL: round_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -90,10 +90,10 @@ define <16 x half> @round_v16f16(<16 x half> %x) {
 ; CHECK-LABEL: round_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -110,11 +110,11 @@ define <32 x half> @round_v32f16(<32 x half> %x) {
 ; CHECK-LABEL: round_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
 ; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -133,8 +133,8 @@ define <1 x float> @round_v1f32(<1 x float> %x) {
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -153,8 +153,8 @@ define <2 x float> @round_v2f32(<2 x float> %x) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -173,8 +173,8 @@ define <4 x float> @round_v4f32(<4 x float> %x) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -193,8 +193,8 @@ define <8 x float> @round_v8f32(<8 x float> %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -213,8 +213,8 @@ define <16 x float> @round_v16f32(<16 x float> %x) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -231,10 +231,10 @@ define <1 x double> @round_v1f64(<1 x double> %x) {
 ; CHECK-LABEL: round_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -251,10 +251,10 @@ define <2 x double> @round_v2f64(<2 x double> %x) {
 ; CHECK-LABEL: round_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -271,10 +271,10 @@ define <4 x double> @round_v4f64(<4 x double> %x) {
 ; CHECK-LABEL: round_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -291,10 +291,10 @@ define <8 x double> @round_v8f64(<8 x double> %x) {
 ; CHECK-LABEL: round_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI14_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
index 3739fb677b3f..00efbc75c066 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
@@ -10,10 +10,10 @@ define <1 x half> @roundeven_v1f16(<1 x half> %x) {
 ; CHECK-LABEL: roundeven_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -30,10 +30,10 @@ define <2 x half> @roundeven_v2f16(<2 x half> %x) {
 ; CHECK-LABEL: roundeven_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -50,10 +50,10 @@ define <4 x half> @roundeven_v4f16(<4 x half> %x) {
 ; CHECK-LABEL: roundeven_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -70,10 +70,10 @@ define <8 x half> @roundeven_v8f16(<8 x half> %x) {
 ; CHECK-LABEL: roundeven_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -90,10 +90,10 @@ define <16 x half> @roundeven_v16f16(<16 x half> %x) {
 ; CHECK-LABEL: roundeven_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -110,11 +110,11 @@ define <32 x half> @roundeven_v32f16(<32 x half> %x) {
 ; CHECK-LABEL: roundeven_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
 ; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -133,8 +133,8 @@ define <1 x float> @roundeven_v1f32(<1 x float> %x) {
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -153,8 +153,8 @@ define <2 x float> @roundeven_v2f32(<2 x float> %x) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -173,8 +173,8 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -193,8 +193,8 @@ define <8 x float> @roundeven_v8f32(<8 x float> %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -213,8 +213,8 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -231,10 +231,10 @@ define <1 x double> @roundeven_v1f64(<1 x double> %x) {
 ; CHECK-LABEL: roundeven_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -251,10 +251,10 @@ define <2 x double> @roundeven_v2f64(<2 x double> %x) {
 ; CHECK-LABEL: roundeven_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -271,10 +271,10 @@ define <4 x double> @roundeven_v4f64(<4 x double> %x) {
 ; CHECK-LABEL: roundeven_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -291,10 +291,10 @@ define <8 x double> @roundeven_v8f64(<8 x double> %x) {
 ; CHECK-LABEL: roundeven_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI14_0)(a0)
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 5941e662fec2..26411ecaa4b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -7195,15 +7195,15 @@ define <2 x half> @mgather_v2f16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x half> %passt
 ; RV64ZVE32F-NEXT:  .LBB59_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB59_3: # %cond.load
-; RV64ZVE32F-NEXT:    flh ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    andi a2, a2, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB59_2
 ; RV64ZVE32F-NEXT:  .LBB59_4: # %cond.load1
-; RV64ZVE32F-NEXT:    flh ft0, 0(a1)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
 ; RV64ZVE32F-NEXT:    ret
   %v = call <2 x half> @llvm.masked.gather.v2f16.v2p0(<2 x ptr> %ptrs, i32 2, <2 x i1> %m, <2 x half> %passthru)
@@ -7246,34 +7246,34 @@ define <4 x half> @mgather_v4f16(<4 x ptr> %ptrs, <4 x i1> %m, <4 x half> %passt
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB60_5: # %cond.load
 ; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB60_2
 ; RV64ZVE32F-NEXT:  .LBB60_6: # %cond.load1
 ; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB60_3
 ; RV64ZVE32F-NEXT:  .LBB60_7: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
 ; RV64ZVE32F-NEXT:    andi a1, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB60_4
 ; RV64ZVE32F-NEXT:  .LBB60_8: # %cond.load7
 ; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    ret
   %v = call <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> %m, <4 x half> %passthru)
@@ -7314,34 +7314,34 @@ define <4 x half> @mgather_truemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru)
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB61_5: # %cond.load
 ; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB61_2
 ; RV64ZVE32F-NEXT:  .LBB61_6: # %cond.load1
 ; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB61_3
 ; RV64ZVE32F-NEXT:  .LBB61_7: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
 ; RV64ZVE32F-NEXT:    andi a1, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB61_4
 ; RV64ZVE32F-NEXT:  .LBB61_8: # %cond.load7
 ; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    ret
   %mhead = insertelement <4 x i1> poison, i1 1, i32 0
@@ -7416,70 +7416,70 @@ define <8 x half> @mgather_v8f16(<8 x ptr> %ptrs, <8 x i1> %m, <8 x half> %passt
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB63_9: # %cond.load
 ; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB63_2
 ; RV64ZVE32F-NEXT:  .LBB63_10: # %cond.load1
 ; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB63_3
 ; RV64ZVE32F-NEXT:  .LBB63_11: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB63_4
 ; RV64ZVE32F-NEXT:  .LBB63_12: # %cond.load7
 ; RV64ZVE32F-NEXT:    ld a2, 24(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB63_5
 ; RV64ZVE32F-NEXT:  .LBB63_13: # %cond.load10
 ; RV64ZVE32F-NEXT:    ld a2, 32(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB63_6
 ; RV64ZVE32F-NEXT:  .LBB63_14: # %cond.load13
 ; RV64ZVE32F-NEXT:    ld a2, 40(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 64
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB63_7
 ; RV64ZVE32F-NEXT:  .LBB63_15: # %cond.load16
 ; RV64ZVE32F-NEXT:    ld a2, 48(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB63_8
 ; RV64ZVE32F-NEXT:  .LBB63_16: # %cond.load19
 ; RV64ZVE32F-NEXT:    ld a0, 56(a0)
-; RV64ZVE32F-NEXT:    flh ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
   %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru)
@@ -7517,9 +7517,9 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:  .LBB64_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB64_4
@@ -7529,9 +7529,9 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 1
 ; RV64ZVE32F-NEXT:  .LBB64_4: # %else2
@@ -7543,9 +7543,9 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v11, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v11, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v11, 2
 ; RV64ZVE32F-NEXT:  .LBB64_6: # %else5
@@ -7565,9 +7565,9 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 5
 ; RV64ZVE32F-NEXT:  .LBB64_10: # %else14
@@ -7587,9 +7587,9 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -7599,9 +7599,9 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -7611,9 +7611,9 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
@@ -7624,9 +7624,9 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
 ; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV64ZVE32F-NEXT:    slli a1, a1, 1
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flh ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v8, 7
 ; RV64ZVE32F-NEXT:    vmv1r.v v8, v9
 ; RV64ZVE32F-NEXT:    ret
@@ -7666,9 +7666,9 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:  .LBB65_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB65_4
@@ -7678,9 +7678,9 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 1
 ; RV64ZVE32F-NEXT:  .LBB65_4: # %else2
@@ -7692,9 +7692,9 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v11, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v11, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v11, 2
 ; RV64ZVE32F-NEXT:  .LBB65_6: # %else5
@@ -7714,9 +7714,9 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 5
 ; RV64ZVE32F-NEXT:  .LBB65_10: # %else14
@@ -7736,9 +7736,9 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -7748,9 +7748,9 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -7760,9 +7760,9 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
@@ -7773,9 +7773,9 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV64ZVE32F-NEXT:    slli a1, a1, 1
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flh ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v8, 7
 ; RV64ZVE32F-NEXT:    vmv1r.v v8, v9
 ; RV64ZVE32F-NEXT:    ret
@@ -7817,9 +7817,9 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:  .LBB66_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB66_4
@@ -7830,9 +7830,9 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 1
 ; RV64ZVE32F-NEXT:  .LBB66_4: # %else2
@@ -7845,9 +7845,9 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v11, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v11, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v11, 2
 ; RV64ZVE32F-NEXT:  .LBB66_6: # %else5
@@ -7868,9 +7868,9 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 5
 ; RV64ZVE32F-NEXT:  .LBB66_10: # %else14
@@ -7891,9 +7891,9 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -7904,9 +7904,9 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -7917,9 +7917,9 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
@@ -7931,9 +7931,9 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    andi a1, a1, 255
 ; RV64ZVE32F-NEXT:    slli a1, a1, 1
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flh ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v8, 7
 ; RV64ZVE32F-NEXT:    vmv1r.v v8, v9
 ; RV64ZVE32F-NEXT:    ret
@@ -7975,9 +7975,9 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:  .LBB67_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB67_4
@@ -7987,8 +7987,8 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 1
 ; RV64ZVE32F-NEXT:  .LBB67_4: # %else2
@@ -8000,8 +8000,8 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v11, ft0
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v11, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v11, 2
 ; RV64ZVE32F-NEXT:  .LBB67_6: # %else5
@@ -8021,8 +8021,8 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 5
 ; RV64ZVE32F-NEXT:  .LBB67_10: # %else14
@@ -8042,8 +8042,8 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v10
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -8053,9 +8053,9 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -8065,8 +8065,8 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 1
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flh ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v10, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
@@ -8077,8 +8077,8 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
 ; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV64ZVE32F-NEXT:    slli a1, a1, 1
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flh ft0, 0(a0)
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    flh fa5, 0(a0)
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v9, v8, 7
 ; RV64ZVE32F-NEXT:    vmv1r.v v8, v9
@@ -8162,15 +8162,15 @@ define <2 x float> @mgather_v2f32(<2 x ptr> %ptrs, <2 x i1> %m, <2 x float> %pas
 ; RV64ZVE32F-NEXT:  .LBB69_2: # %else2
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB69_3: # %cond.load
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    andi a2, a2, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB69_2
 ; RV64ZVE32F-NEXT:  .LBB69_4: # %cond.load1
-; RV64ZVE32F-NEXT:    flw ft0, 0(a1)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a1)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
 ; RV64ZVE32F-NEXT:    ret
   %v = call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> %ptrs, i32 4, <2 x i1> %m, <2 x float> %passthru)
@@ -8213,34 +8213,34 @@ define <4 x float> @mgather_v4f32(<4 x ptr> %ptrs, <4 x i1> %m, <4 x float> %pas
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB70_5: # %cond.load
 ; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB70_2
 ; RV64ZVE32F-NEXT:  .LBB70_6: # %cond.load1
 ; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB70_3
 ; RV64ZVE32F-NEXT:  .LBB70_7: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
 ; RV64ZVE32F-NEXT:    andi a1, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB70_4
 ; RV64ZVE32F-NEXT:  .LBB70_8: # %cond.load7
 ; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    ret
   %v = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %m, <4 x float> %passthru)
@@ -8280,34 +8280,34 @@ define <4 x float> @mgather_truemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthr
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB71_5: # %cond.load
 ; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB71_2
 ; RV64ZVE32F-NEXT:  .LBB71_6: # %cond.load1
 ; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB71_3
 ; RV64ZVE32F-NEXT:  .LBB71_7: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
 ; RV64ZVE32F-NEXT:    andi a1, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB71_4
 ; RV64ZVE32F-NEXT:  .LBB71_8: # %cond.load7
 ; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    ret
   %mhead = insertelement <4 x i1> poison, i1 1, i32 0
@@ -8382,70 +8382,70 @@ define <8 x float> @mgather_v8f32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x float> %pas
 ; RV64ZVE32F-NEXT:    ret
 ; RV64ZVE32F-NEXT:  .LBB73_9: # %cond.load
 ; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB73_2
 ; RV64ZVE32F-NEXT:  .LBB73_10: # %cond.load1
 ; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 1
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB73_3
 ; RV64ZVE32F-NEXT:  .LBB73_11: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB73_4
 ; RV64ZVE32F-NEXT:  .LBB73_12: # %cond.load7
 ; RV64ZVE32F-NEXT:    ld a2, 24(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB73_5
 ; RV64ZVE32F-NEXT:  .LBB73_13: # %cond.load10
 ; RV64ZVE32F-NEXT:    ld a2, 32(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB73_6
 ; RV64ZVE32F-NEXT:  .LBB73_14: # %cond.load13
 ; RV64ZVE32F-NEXT:    ld a2, 40(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 64
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB73_7
 ; RV64ZVE32F-NEXT:  .LBB73_15: # %cond.load16
 ; RV64ZVE32F-NEXT:    ld a2, 48(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
 ; RV64ZVE32F-NEXT:    beqz a1, .LBB73_8
 ; RV64ZVE32F-NEXT:  .LBB73_16: # %cond.load19
 ; RV64ZVE32F-NEXT:    ld a0, 56(a0)
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 7
 ; RV64ZVE32F-NEXT:    ret
   %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru)
@@ -8482,9 +8482,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB74_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB74_4
@@ -8494,9 +8494,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
 ; RV64ZVE32F-NEXT:  .LBB74_4: # %else2
@@ -8508,9 +8508,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:  .LBB74_6: # %else5
@@ -8530,9 +8530,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 5
 ; RV64ZVE32F-NEXT:  .LBB74_10: # %else14
@@ -8552,9 +8552,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -8564,9 +8564,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -8576,9 +8576,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
@@ -8589,9 +8589,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV64ZVE32F-NEXT:    slli a1, a1, 2
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 7
 ; RV64ZVE32F-NEXT:    vmv2r.v v8, v10
@@ -8631,9 +8631,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB75_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB75_4
@@ -8643,9 +8643,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
 ; RV64ZVE32F-NEXT:  .LBB75_4: # %else2
@@ -8657,9 +8657,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:  .LBB75_6: # %else5
@@ -8679,9 +8679,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 5
 ; RV64ZVE32F-NEXT:  .LBB75_10: # %else14
@@ -8701,9 +8701,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -8713,9 +8713,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -8725,9 +8725,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
@@ -8738,9 +8738,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV64ZVE32F-NEXT:    slli a1, a1, 2
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 7
 ; RV64ZVE32F-NEXT:    vmv2r.v v8, v10
@@ -8782,9 +8782,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB76_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB76_4
@@ -8795,9 +8795,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
 ; RV64ZVE32F-NEXT:  .LBB76_4: # %else2
@@ -8810,9 +8810,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:  .LBB76_6: # %else5
@@ -8833,9 +8833,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 5
 ; RV64ZVE32F-NEXT:  .LBB76_10: # %else14
@@ -8856,9 +8856,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -8869,9 +8869,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -8882,9 +8882,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    andi a2, a2, 255
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
@@ -8896,9 +8896,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    andi a1, a1, 255
 ; RV64ZVE32F-NEXT:    slli a1, a1, 2
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 7
 ; RV64ZVE32F-NEXT:    vmv2r.v v8, v10
@@ -8940,9 +8940,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB77_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB77_4
@@ -8952,9 +8952,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
 ; RV64ZVE32F-NEXT:  .LBB77_4: # %else2
@@ -8966,9 +8966,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:  .LBB77_6: # %else5
@@ -8988,9 +8988,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 5
 ; RV64ZVE32F-NEXT:  .LBB77_10: # %else14
@@ -9010,9 +9010,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -9022,9 +9022,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -9034,9 +9034,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
@@ -9047,9 +9047,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV64ZVE32F-NEXT:    slli a1, a1, 2
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 7
 ; RV64ZVE32F-NEXT:    vmv2r.v v8, v10
@@ -9090,9 +9090,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB78_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB78_4
@@ -9102,9 +9102,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
 ; RV64ZVE32F-NEXT:  .LBB78_4: # %else2
@@ -9116,9 +9116,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:  .LBB78_6: # %else5
@@ -9138,9 +9138,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 5
 ; RV64ZVE32F-NEXT:  .LBB78_10: # %else14
@@ -9160,9 +9160,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -9172,9 +9172,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -9184,9 +9184,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
@@ -9197,9 +9197,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV64ZVE32F-NEXT:    slli a1, a1, 2
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 7
 ; RV64ZVE32F-NEXT:    vmv2r.v v8, v10
@@ -9244,9 +9244,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    and a3, a3, a1
 ; RV64ZVE32F-NEXT:    slli a3, a3, 2
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
-; RV64ZVE32F-NEXT:    flw ft0, 0(a3)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB79_2: # %else
 ; RV64ZVE32F-NEXT:    andi a3, a2, 2
 ; RV64ZVE32F-NEXT:    beqz a3, .LBB79_4
@@ -9257,9 +9257,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    and a3, a3, a1
 ; RV64ZVE32F-NEXT:    slli a3, a3, 2
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
-; RV64ZVE32F-NEXT:    flw ft0, 0(a3)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
 ; RV64ZVE32F-NEXT:  .LBB79_4: # %else2
@@ -9272,9 +9272,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    and a3, a3, a1
 ; RV64ZVE32F-NEXT:    slli a3, a3, 2
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
-; RV64ZVE32F-NEXT:    flw ft0, 0(a3)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:  .LBB79_6: # %else5
@@ -9295,9 +9295,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    and a3, a3, a1
 ; RV64ZVE32F-NEXT:    slli a3, a3, 2
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
-; RV64ZVE32F-NEXT:    flw ft0, 0(a3)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 5
 ; RV64ZVE32F-NEXT:  .LBB79_10: # %else14
@@ -9318,9 +9318,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    and a3, a3, a1
 ; RV64ZVE32F-NEXT:    slli a3, a3, 2
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
-; RV64ZVE32F-NEXT:    flw ft0, 0(a3)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
 ; RV64ZVE32F-NEXT:    andi a3, a2, 16
@@ -9331,9 +9331,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    and a3, a3, a1
 ; RV64ZVE32F-NEXT:    slli a3, a3, 2
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
-; RV64ZVE32F-NEXT:    flw ft0, 0(a3)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 4
 ; RV64ZVE32F-NEXT:    andi a3, a2, 32
@@ -9344,9 +9344,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    and a3, a3, a1
 ; RV64ZVE32F-NEXT:    slli a3, a3, 2
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
-; RV64ZVE32F-NEXT:    flw ft0, 0(a3)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 6
 ; RV64ZVE32F-NEXT:    andi a2, a2, -128
@@ -9358,9 +9358,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    and a1, a2, a1
 ; RV64ZVE32F-NEXT:    slli a1, a1, 2
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 7
 ; RV64ZVE32F-NEXT:    vmv2r.v v8, v10
@@ -9401,9 +9401,9 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v10, ft0
+; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB80_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB80_4
@@ -9413,8 +9413,8 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
 ; RV64ZVE32F-NEXT:  .LBB80_4: # %else2
@@ -9439,8 +9439,8 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 5
 ; RV64ZVE32F-NEXT:  .LBB80_9: # %else14
@@ -9458,8 +9458,8 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v14, ft0
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v14, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
@@ -9470,8 +9470,8 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -9481,8 +9481,8 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -9492,8 +9492,8 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
-; RV64ZVE32F-NEXT:    flw ft0, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, ft0
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e32, m2, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
@@ -9504,8 +9504,8 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV64ZVE32F-NEXT:    slli a1, a1, 2
 ; RV64ZVE32F-NEXT:    add a0, a0, a1
-; RV64ZVE32F-NEXT:    flw ft0, 0(a0)
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, ft0
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 7
 ; RV64ZVE32F-NEXT:    vmv2r.v v8, v10

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
index 01b7b4600361..d71b170850eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
@@ -7,8 +7,8 @@ define void @masked_load_v1f16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a1)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -25,8 +25,8 @@ define void @masked_load_v1f32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -43,8 +43,8 @@ define void @masked_load_v1f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a1)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vle64.v v8, (a0), v0.t
 ; RV32-NEXT:    vse64.v v8, (a2)
 ; RV32-NEXT:    ret
@@ -53,8 +53,8 @@ define void @masked_load_v1f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a1)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vle64.v v8, (a0), v0.t
 ; RV64-NEXT:    vse64.v v8, (a2)
 ; RV64-NEXT:    ret
@@ -71,8 +71,8 @@ define void @masked_load_v2f16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a1)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -89,8 +89,8 @@ define void @masked_load_v2f32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -107,8 +107,8 @@ define void @masked_load_v2f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a1)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vle64.v v8, (a0), v0.t
 ; RV32-NEXT:    vse64.v v8, (a2)
 ; RV32-NEXT:    ret
@@ -117,8 +117,8 @@ define void @masked_load_v2f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a1)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vle64.v v8, (a0), v0.t
 ; RV64-NEXT:    vse64.v v8, (a2)
 ; RV64-NEXT:    ret
@@ -135,8 +135,8 @@ define void @masked_load_v4f16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a1)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -153,8 +153,8 @@ define void @masked_load_v4f32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -171,8 +171,8 @@ define void @masked_load_v4f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a1)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vle64.v v8, (a0), v0.t
 ; RV32-NEXT:    vse64.v v8, (a2)
 ; RV32-NEXT:    ret
@@ -181,8 +181,8 @@ define void @masked_load_v4f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a1)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vle64.v v8, (a0), v0.t
 ; RV64-NEXT:    vse64.v v8, (a2)
 ; RV64-NEXT:    ret
@@ -199,8 +199,8 @@ define void @masked_load_v8f16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a1)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -217,8 +217,8 @@ define void @masked_load_v8f32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -235,8 +235,8 @@ define void @masked_load_v8f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a1)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vle64.v v8, (a0), v0.t
 ; RV32-NEXT:    vse64.v v8, (a2)
 ; RV32-NEXT:    ret
@@ -245,8 +245,8 @@ define void @masked_load_v8f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a1)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vle64.v v8, (a0), v0.t
 ; RV64-NEXT:    vse64.v v8, (a2)
 ; RV64-NEXT:    ret
@@ -263,8 +263,8 @@ define void @masked_load_v16f16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a1)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -281,8 +281,8 @@ define void @masked_load_v16f32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -299,8 +299,8 @@ define void @masked_load_v16f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a1)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vle64.v v8, (a0), v0.t
 ; RV32-NEXT:    vse64.v v8, (a2)
 ; RV32-NEXT:    ret
@@ -309,8 +309,8 @@ define void @masked_load_v16f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a1)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vle64.v v8, (a0), v0.t
 ; RV64-NEXT:    vse64.v v8, (a2)
 ; RV64-NEXT:    ret
@@ -328,8 +328,8 @@ define void @masked_load_v32f16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a1)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -347,8 +347,8 @@ define void @masked_load_v32f32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -367,9 +367,9 @@ define void @masked_load_v32f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vle64.v v16, (a1)
 ; RV32-NEXT:    vle64.v v24, (a3)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v8, v16, ft0
-; RV32-NEXT:    vmfeq.vf v0, v24, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v8, v16, fa5
+; RV32-NEXT:    vmfeq.vf v0, v24, fa5
 ; RV32-NEXT:    addi a1, a0, 128
 ; RV32-NEXT:    vle64.v v16, (a1), v0.t
 ; RV32-NEXT:    vmv1r.v v0, v8
@@ -385,9 +385,9 @@ define void @masked_load_v32f64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vle64.v v16, (a1)
 ; RV64-NEXT:    vle64.v v24, (a3)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v8, v16, ft0
-; RV64-NEXT:    vmfeq.vf v0, v24, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v8, v16, fa5
+; RV64-NEXT:    vmfeq.vf v0, v24, fa5
 ; RV64-NEXT:    addi a1, a0, 128
 ; RV64-NEXT:    vle64.v v16, (a1), v0.t
 ; RV64-NEXT:    vmv1r.v v0, v8
@@ -410,8 +410,8 @@ define void @masked_load_v64f16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m8, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a1)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
@@ -431,9 +431,9 @@ define void @masked_load_v64f32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK-NEXT:    vsetvli zero, a4, e32, m8, ta, ma
 ; CHECK-NEXT:    vle32.v v16, (a1)
 ; CHECK-NEXT:    vle32.v v24, (a3)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v8, v16, ft0
-; CHECK-NEXT:    vmfeq.vf v0, v24, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v8, v16, fa5
+; CHECK-NEXT:    vmfeq.vf v0, v24, fa5
 ; CHECK-NEXT:    addi a1, a0, 128
 ; CHECK-NEXT:    vle32.v v16, (a1), v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v8
@@ -458,9 +458,9 @@ define void @masked_load_v128f16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
 ; CHECK-NEXT:    vsetvli zero, a4, e16, m8, ta, ma
 ; CHECK-NEXT:    vle16.v v16, (a1)
 ; CHECK-NEXT:    vle16.v v24, (a3)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v8, v16, ft0
-; CHECK-NEXT:    vmfeq.vf v0, v24, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v8, v16, fa5
+; CHECK-NEXT:    vmfeq.vf v0, v24, fa5
 ; CHECK-NEXT:    addi a1, a0, 128
 ; CHECK-NEXT:    vle16.v v16, (a1), v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
index ad57b686dd16..dbdd86909f4a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
@@ -8,8 +8,8 @@ define void @masked_store_v1f16(<1 x half>* %val_ptr, <1 x half>* %a, <1 x half>
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse16.v v9, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <1 x half>, <1 x half>* %m_ptr
@@ -26,8 +26,8 @@ define void @masked_store_v1f32(<1 x float>* %val_ptr, <1 x float>* %a, <1 x flo
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a2)
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse32.v v9, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <1 x float>, <1 x float>* %m_ptr
@@ -44,8 +44,8 @@ define void @masked_store_v1f64(<1 x double>* %val_ptr, <1 x double>* %a, <1 x d
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a2)
 ; RV32-NEXT:    vle64.v v9, (a0)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vse64.v v9, (a1), v0.t
 ; RV32-NEXT:    ret
 ;
@@ -54,8 +54,8 @@ define void @masked_store_v1f64(<1 x double>* %val_ptr, <1 x double>* %a, <1 x d
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a2)
 ; RV64-NEXT:    vle64.v v9, (a0)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vse64.v v9, (a1), v0.t
 ; RV64-NEXT:    ret
   %m = load <1 x double>, <1 x double>* %m_ptr
@@ -72,8 +72,8 @@ define void @masked_store_v2f16(<2 x half>* %val_ptr, <2 x half>* %a, <2 x half>
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse16.v v9, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <2 x half>, <2 x half>* %m_ptr
@@ -90,8 +90,8 @@ define void @masked_store_v2f32(<2 x float>* %val_ptr, <2 x float>* %a, <2 x flo
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a2)
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse32.v v9, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <2 x float>, <2 x float>* %m_ptr
@@ -108,8 +108,8 @@ define void @masked_store_v2f64(<2 x double>* %val_ptr, <2 x double>* %a, <2 x d
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a2)
 ; RV32-NEXT:    vle64.v v9, (a0)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vse64.v v9, (a1), v0.t
 ; RV32-NEXT:    ret
 ;
@@ -118,8 +118,8 @@ define void @masked_store_v2f64(<2 x double>* %val_ptr, <2 x double>* %a, <2 x d
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a2)
 ; RV64-NEXT:    vle64.v v9, (a0)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vse64.v v9, (a1), v0.t
 ; RV64-NEXT:    ret
   %m = load <2 x double>, <2 x double>* %m_ptr
@@ -136,8 +136,8 @@ define void @masked_store_v4f16(<4 x half>* %val_ptr, <4 x half>* %a, <4 x half>
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse16.v v9, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <4 x half>, <4 x half>* %m_ptr
@@ -154,8 +154,8 @@ define void @masked_store_v4f32(<4 x float>* %val_ptr, <4 x float>* %a, <4 x flo
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a2)
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse32.v v9, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <4 x float>, <4 x float>* %m_ptr
@@ -172,8 +172,8 @@ define void @masked_store_v4f64(<4 x double>* %val_ptr, <4 x double>* %a, <4 x d
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a2)
 ; RV32-NEXT:    vle64.v v10, (a0)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vse64.v v10, (a1), v0.t
 ; RV32-NEXT:    ret
 ;
@@ -182,8 +182,8 @@ define void @masked_store_v4f64(<4 x double>* %val_ptr, <4 x double>* %a, <4 x d
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a2)
 ; RV64-NEXT:    vle64.v v10, (a0)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vse64.v v10, (a1), v0.t
 ; RV64-NEXT:    ret
   %m = load <4 x double>, <4 x double>* %m_ptr
@@ -200,8 +200,8 @@ define void @masked_store_v8f16(<8 x half>* %val_ptr, <8 x half>* %a, <8 x half>
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse16.v v9, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <8 x half>, <8 x half>* %m_ptr
@@ -218,8 +218,8 @@ define void @masked_store_v8f32(<8 x float>* %val_ptr, <8 x float>* %a, <8 x flo
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a2)
 ; CHECK-NEXT:    vle32.v v10, (a0)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse32.v v10, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <8 x float>, <8 x float>* %m_ptr
@@ -236,8 +236,8 @@ define void @masked_store_v8f64(<8 x double>* %val_ptr, <8 x double>* %a, <8 x d
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a2)
 ; RV32-NEXT:    vle64.v v12, (a0)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vse64.v v12, (a1), v0.t
 ; RV32-NEXT:    ret
 ;
@@ -246,8 +246,8 @@ define void @masked_store_v8f64(<8 x double>* %val_ptr, <8 x double>* %a, <8 x d
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a2)
 ; RV64-NEXT:    vle64.v v12, (a0)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vse64.v v12, (a1), v0.t
 ; RV64-NEXT:    ret
   %m = load <8 x double>, <8 x double>* %m_ptr
@@ -264,8 +264,8 @@ define void @masked_store_v16f16(<16 x half>* %val_ptr, <16 x half>* %a, <16 x h
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v10, (a0)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse16.v v10, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <16 x half>, <16 x half>* %m_ptr
@@ -282,8 +282,8 @@ define void @masked_store_v16f32(<16 x float>* %val_ptr, <16 x float>* %a, <16 x
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a2)
 ; CHECK-NEXT:    vle32.v v12, (a0)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse32.v v12, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <16 x float>, <16 x float>* %m_ptr
@@ -300,8 +300,8 @@ define void @masked_store_v16f64(<16 x double>* %val_ptr, <16 x double>* %a, <16
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vle64.v v8, (a2)
 ; RV32-NEXT:    vle64.v v16, (a0)
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vse64.v v16, (a1), v0.t
 ; RV32-NEXT:    ret
 ;
@@ -310,8 +310,8 @@ define void @masked_store_v16f64(<16 x double>* %val_ptr, <16 x double>* %a, <16
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a2)
 ; RV64-NEXT:    vle64.v v16, (a0)
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vse64.v v16, (a1), v0.t
 ; RV64-NEXT:    ret
   %m = load <16 x double>, <16 x double>* %m_ptr
@@ -329,8 +329,8 @@ define void @masked_store_v32f16(<32 x half>* %val_ptr, <32 x half>* %a, <32 x h
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v12, (a0)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse16.v v12, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <32 x half>, <32 x half>* %m_ptr
@@ -348,8 +348,8 @@ define void @masked_store_v32f32(<32 x float>* %val_ptr, <32 x float>* %a, <32 x
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a2)
 ; CHECK-NEXT:    vle32.v v16, (a0)
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse32.v v16, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <32 x float>, <32 x float>* %m_ptr
@@ -376,8 +376,8 @@ define void @masked_store_v32f64(<32 x double>* %val_ptr, <32 x double>* %a, <32
 ; RV32-NEXT:    add a2, sp, a2
 ; RV32-NEXT:    addi a2, a2, 16
 ; RV32-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV32-NEXT:    fcvt.d.w ft0, zero
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    fcvt.d.w fa5, zero
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    vle64.v v24, (a0)
 ; RV32-NEXT:    addi a0, a0, 128
 ; RV32-NEXT:    vle64.v v8, (a0)
@@ -388,7 +388,7 @@ define void @masked_store_v32f64(<32 x double>* %val_ptr, <32 x double>* %a, <32
 ; RV32-NEXT:    add a0, sp, a0
 ; RV32-NEXT:    addi a0, a0, 16
 ; RV32-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT:    vmfeq.vf v8, v16, ft0
+; RV32-NEXT:    vmfeq.vf v8, v16, fa5
 ; RV32-NEXT:    vse64.v v24, (a1), v0.t
 ; RV32-NEXT:    addi a0, a1, 128
 ; RV32-NEXT:    vmv1r.v v0, v8
@@ -416,8 +416,8 @@ define void @masked_store_v32f64(<32 x double>* %val_ptr, <32 x double>* %a, <32
 ; RV64-NEXT:    add a2, sp, a2
 ; RV64-NEXT:    addi a2, a2, 16
 ; RV64-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV64-NEXT:    fmv.d.x ft0, zero
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    vle64.v v24, (a0)
 ; RV64-NEXT:    addi a0, a0, 128
 ; RV64-NEXT:    vle64.v v8, (a0)
@@ -428,7 +428,7 @@ define void @masked_store_v32f64(<32 x double>* %val_ptr, <32 x double>* %a, <32
 ; RV64-NEXT:    add a0, sp, a0
 ; RV64-NEXT:    addi a0, a0, 16
 ; RV64-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV64-NEXT:    vmfeq.vf v8, v16, ft0
+; RV64-NEXT:    vmfeq.vf v8, v16, fa5
 ; RV64-NEXT:    vse64.v v24, (a1), v0.t
 ; RV64-NEXT:    addi a0, a1, 128
 ; RV64-NEXT:    vmv1r.v v0, v8
@@ -455,8 +455,8 @@ define void @masked_store_v64f16(<64 x half>* %val_ptr, <64 x half>* %a, <64 x h
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m8, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v16, (a0)
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vse16.v v16, (a1), v0.t
 ; CHECK-NEXT:    ret
   %m = load <64 x half>, <64 x half>* %m_ptr
@@ -484,8 +484,8 @@ define void @masked_store_v64f32(<64 x float>* %val_ptr, <64 x float>* %a, <64 x
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 16
 ; CHECK-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT:    fmv.w.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.w.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle32.v v24, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
 ; CHECK-NEXT:    vle32.v v8, (a0)
@@ -496,7 +496,7 @@ define void @masked_store_v64f32(<64 x float>* %val_ptr, <64 x float>* %a, <64 x
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 16
 ; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vmfeq.vf v8, v16, ft0
+; CHECK-NEXT:    vmfeq.vf v8, v16, fa5
 ; CHECK-NEXT:    vse32.v v24, (a1), v0.t
 ; CHECK-NEXT:    addi a0, a1, 128
 ; CHECK-NEXT:    vmv1r.v v0, v8
@@ -533,8 +533,8 @@ define void @masked_store_v128f16(<128 x half>* %val_ptr, <128 x half>* %a, <128
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 16
 ; CHECK-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT:    fmv.h.x ft0, zero
-; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    fmv.h.x fa5, zero
+; CHECK-NEXT:    vmfeq.vf v0, v8, fa5
 ; CHECK-NEXT:    vle16.v v24, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
 ; CHECK-NEXT:    vle16.v v8, (a0)
@@ -545,7 +545,7 @@ define void @masked_store_v128f16(<128 x half>* %val_ptr, <128 x half>* %a, <128
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 16
 ; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vmfeq.vf v8, v16, ft0
+; CHECK-NEXT:    vmfeq.vf v8, v16, fa5
 ; CHECK-NEXT:    vse16.v v24, (a1), v0.t
 ; CHECK-NEXT:    addi a0, a1, 128
 ; CHECK-NEXT:    vmv1r.v v0, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index b13db3c84828..643be941f9be 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -10,11 +10,11 @@ define <2 x half> @vp_nearbyint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %
 ; CHECK-LABEL: vp_nearbyint_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,13 +31,13 @@ define <2 x half> @vp_nearbyint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_nearbyint_v2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -58,11 +58,11 @@ define <4 x half> @vp_nearbyint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %
 ; CHECK-LABEL: vp_nearbyint_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -79,13 +79,13 @@ define <4 x half> @vp_nearbyint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_nearbyint_v4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -106,11 +106,11 @@ define <8 x half> @vp_nearbyint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK-LABEL: vp_nearbyint_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -127,13 +127,13 @@ define <8 x half> @vp_nearbyint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_nearbyint_v8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -155,11 +155,11 @@ define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -177,14 +177,14 @@ define <16 x half> @vp_nearbyint_v16f16_unmasked(<16 x half> %va, i32 zeroext %e
 ; CHECK-LABEL: vp_nearbyint_v16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -208,9 +208,9 @@ define <2 x float> @vp_nearbyint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -231,9 +231,9 @@ define <2 x float> @vp_nearbyint_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -256,9 +256,9 @@ define <4 x float> @vp_nearbyint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -279,9 +279,9 @@ define <4 x float> @vp_nearbyint_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -305,9 +305,9 @@ define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -330,9 +330,9 @@ define <8 x float> @vp_nearbyint_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -357,9 +357,9 @@ define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -382,9 +382,9 @@ define <16 x float> @vp_nearbyint_v16f32_unmasked(<16 x float> %va, i32 zeroext
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -406,11 +406,11 @@ define <2 x double> @vp_nearbyint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroe
 ; CHECK-LABEL: vp_nearbyint_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI16_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -427,13 +427,13 @@ define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 ; CHECK-LABEL: vp_nearbyint_v2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI17_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -455,11 +455,11 @@ define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI18_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -477,14 +477,14 @@ define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 ; CHECK-LABEL: vp_nearbyint_v4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI19_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -507,11 +507,11 @@ define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI20_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -529,14 +529,14 @@ define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 ; CHECK-LABEL: vp_nearbyint_v8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI21_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v12
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -559,11 +559,11 @@ define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 z
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -581,14 +581,14 @@ define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroex
 ; CHECK-LABEL: vp_nearbyint_v15f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -611,11 +611,11 @@ define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 z
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -633,14 +633,14 @@ define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroex
 ; CHECK-LABEL: vp_nearbyint_v16f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -671,12 +671,12 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 ; CHECK-NEXT:    li a1, 16
 ; CHECK-NEXT:  .LBB26_2:
 ; CHECK-NEXT:    lui a2, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -693,7 +693,7 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -719,13 +719,13 @@ define <32 x double> @vp_nearbyint_v32f64_unmasked(<32 x double> %va, i32 zeroex
 ; CHECK-NEXT:    li a1, 16
 ; CHECK-NEXT:  .LBB27_2:
 ; CHECK-NEXT:    lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -742,7 +742,7 @@ define <32 x double> @vp_nearbyint_v32f64_unmasked(<32 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index 3feb976fc0e1..084124fcf13b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -9,8 +9,8 @@ define half @vreduce_fadd_v1f16(ptr %x, half %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.h fa0, fa0, ft0
+; CHECK-NEXT:    vfmv.f.s fa5, v8
+; CHECK-NEXT:    fadd.h fa0, fa0, fa5
 ; CHECK-NEXT:    ret
   %v = load <1 x half>, ptr %x
   %red = call reassoc half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v)
@@ -260,8 +260,8 @@ define float @vreduce_fadd_v1f32(ptr %x, float %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.s fa0, fa0, ft0
+; CHECK-NEXT:    vfmv.f.s fa5, v8
+; CHECK-NEXT:    fadd.s fa0, fa0, fa5
 ; CHECK-NEXT:    ret
   %v = load <1 x float>, ptr %x
   %red = call reassoc float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v)
@@ -289,8 +289,8 @@ define float @vreduce_fwadd_v1f32(ptr %x, float %s) {
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfmv.f.s ft0, v9
-; CHECK-NEXT:    fadd.s fa0, fa0, ft0
+; CHECK-NEXT:    vfmv.f.s fa5, v9
+; CHECK-NEXT:    fadd.s fa0, fa0, fa5
 ; CHECK-NEXT:    ret
   %v = load <1 x half>, ptr %x
   %e = fpext <1 x half> %v to <1 x float>
@@ -736,8 +736,8 @@ define double @vreduce_fadd_v1f64(ptr %x, double %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
+; CHECK-NEXT:    vfmv.f.s fa5, v8
+; CHECK-NEXT:    fadd.d fa0, fa0, fa5
 ; CHECK-NEXT:    ret
   %v = load <1 x double>, ptr %x
   %red = call reassoc double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v)
@@ -765,8 +765,8 @@ define double @vreduce_fwadd_v1f64(ptr %x, double %s) {
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfmv.f.s ft0, v9
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
+; CHECK-NEXT:    vfmv.f.s fa5, v9
+; CHECK-NEXT:    fadd.d fa0, fa0, fa5
 ; CHECK-NEXT:    ret
   %v = load <1 x float>, ptr %x
   %e = fpext <1 x float> %v to <1 x double>
@@ -1139,9 +1139,9 @@ define half @vreduce_fmin_v2f16(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI68_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI68_0)(a1)
+; RV32-NEXT:    flh fa5, %lo(.LCPI68_0)(a1)
 ; RV32-NEXT:    vle16.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v9, ft0
+; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vfredmin.vs v8, v8, v9
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1149,10 +1149,10 @@ define half @vreduce_fmin_v2f16(ptr %x) {
 ; RV64-LABEL: vreduce_fmin_v2f16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI68_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI68_0)(a1)
+; RV64-NEXT:    flh fa5, %lo(.LCPI68_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; RV64-NEXT:    vle16.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v9, ft0
+; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vfredmin.vs v8, v8, v9
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1168,9 +1168,9 @@ define half @vreduce_fmin_v4f16(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI69_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI69_0)(a1)
+; RV32-NEXT:    flh fa5, %lo(.LCPI69_0)(a1)
 ; RV32-NEXT:    vle16.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v9, ft0
+; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vfredmin.vs v8, v8, v9
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1178,10 +1178,10 @@ define half @vreduce_fmin_v4f16(ptr %x) {
 ; RV64-LABEL: vreduce_fmin_v4f16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI69_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI69_0)(a1)
+; RV64-NEXT:    flh fa5, %lo(.LCPI69_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64-NEXT:    vle16.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v9, ft0
+; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vfredmin.vs v8, v8, v9
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1195,9 +1195,9 @@ define half @vreduce_fmin_v4f16_nonans(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI70_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI70_0)(a1)
+; RV32-NEXT:    flh fa5, %lo(.LCPI70_0)(a1)
 ; RV32-NEXT:    vle16.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v9, ft0
+; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vfredmin.vs v8, v8, v9
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1205,10 +1205,10 @@ define half @vreduce_fmin_v4f16_nonans(ptr %x) {
 ; RV64-LABEL: vreduce_fmin_v4f16_nonans:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI70_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI70_0)(a1)
+; RV64-NEXT:    flh fa5, %lo(.LCPI70_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64-NEXT:    vle16.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v9, ft0
+; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vfredmin.vs v8, v8, v9
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1222,9 +1222,9 @@ define half @vreduce_fmin_v4f16_nonans_noinfs(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI71_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI71_0)(a1)
+; RV32-NEXT:    flh fa5, %lo(.LCPI71_0)(a1)
 ; RV32-NEXT:    vle16.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v9, ft0
+; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vfredmin.vs v8, v8, v9
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1232,10 +1232,10 @@ define half @vreduce_fmin_v4f16_nonans_noinfs(ptr %x) {
 ; RV64-LABEL: vreduce_fmin_v4f16_nonans_noinfs:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI71_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI71_0)(a1)
+; RV64-NEXT:    flh fa5, %lo(.LCPI71_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64-NEXT:    vle16.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v9, ft0
+; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vfredmin.vs v8, v8, v9
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1254,9 +1254,9 @@ define half @vreduce_fmin_v128f16(ptr %x) {
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
 ; CHECK-NEXT:    lui a1, %hi(.LCPI72_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI72_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI72_0)(a1)
 ; CHECK-NEXT:    vle16.v v16, (a0)
-; CHECK-NEXT:    vfmv.s.f v24, ft0
+; CHECK-NEXT:    vfmv.s.f v24, fa5
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v24
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -1320,9 +1320,9 @@ define float @vreduce_fmin_v4f32_nonans_noinfs(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI76_0)
-; RV32-NEXT:    flw ft0, %lo(.LCPI76_0)(a1)
+; RV32-NEXT:    flw fa5, %lo(.LCPI76_0)(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v9, ft0
+; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vfredmin.vs v8, v8, v9
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1330,10 +1330,10 @@ define float @vreduce_fmin_v4f32_nonans_noinfs(ptr %x) {
 ; RV64-LABEL: vreduce_fmin_v4f32_nonans_noinfs:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI76_0)
-; RV64-NEXT:    flw ft0, %lo(.LCPI76_0)(a1)
+; RV64-NEXT:    flw fa5, %lo(.LCPI76_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV64-NEXT:    vle32.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v9, ft0
+; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vfredmin.vs v8, v8, v9
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1376,9 +1376,9 @@ define double @vreduce_fmin_v2f64(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI78_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI78_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI78_0)(a1)
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v9, ft0
+; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vfredmin.vs v8, v8, v9
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1386,10 +1386,10 @@ define double @vreduce_fmin_v2f64(ptr %x) {
 ; RV64-LABEL: vreduce_fmin_v2f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI78_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI78_0)(a1)
+; RV64-NEXT:    fld fa5, %lo(.LCPI78_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v9, ft0
+; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vfredmin.vs v8, v8, v9
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1405,9 +1405,9 @@ define double @vreduce_fmin_v4f64(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI79_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI79_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI79_0)(a1)
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v10, ft0
+; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vfredmin.vs v8, v8, v10
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1415,10 +1415,10 @@ define double @vreduce_fmin_v4f64(ptr %x) {
 ; RV64-LABEL: vreduce_fmin_v4f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI79_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI79_0)(a1)
+; RV64-NEXT:    fld fa5, %lo(.LCPI79_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v10, ft0
+; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vfredmin.vs v8, v8, v10
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1432,9 +1432,9 @@ define double @vreduce_fmin_v4f64_nonans(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI80_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI80_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI80_0)(a1)
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v10, ft0
+; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vfredmin.vs v8, v8, v10
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1442,10 +1442,10 @@ define double @vreduce_fmin_v4f64_nonans(ptr %x) {
 ; RV64-LABEL: vreduce_fmin_v4f64_nonans:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI80_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI80_0)(a1)
+; RV64-NEXT:    fld fa5, %lo(.LCPI80_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v10, ft0
+; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vfredmin.vs v8, v8, v10
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1459,9 +1459,9 @@ define double @vreduce_fmin_v4f64_nonans_noinfs(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI81_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI81_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI81_0)(a1)
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v10, ft0
+; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vfredmin.vs v8, v8, v10
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1469,10 +1469,10 @@ define double @vreduce_fmin_v4f64_nonans_noinfs(ptr %x) {
 ; RV64-LABEL: vreduce_fmin_v4f64_nonans_noinfs:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI81_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI81_0)(a1)
+; RV64-NEXT:    fld fa5, %lo(.LCPI81_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v10, ft0
+; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vfredmin.vs v8, v8, v10
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1490,9 +1490,9 @@ define double @vreduce_fmin_v32f64(ptr %x) {
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 128
 ; RV32-NEXT:    lui a1, %hi(.LCPI82_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI82_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI82_0)(a1)
 ; RV32-NEXT:    vle64.v v16, (a0)
-; RV32-NEXT:    vfmv.s.f v24, ft0
+; RV32-NEXT:    vfmv.s.f v24, fa5
 ; RV32-NEXT:    vfmin.vv v8, v8, v16
 ; RV32-NEXT:    vfredmin.vs v8, v8, v24
 ; RV32-NEXT:    vfmv.f.s fa0, v8
@@ -1501,12 +1501,12 @@ define double @vreduce_fmin_v32f64(ptr %x) {
 ; RV64-LABEL: vreduce_fmin_v32f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI82_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI82_0)(a1)
+; RV64-NEXT:    fld fa5, %lo(.LCPI82_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 128
 ; RV64-NEXT:    vle64.v v16, (a0)
-; RV64-NEXT:    vfmv.s.f v24, ft0
+; RV64-NEXT:    vfmv.s.f v24, fa5
 ; RV64-NEXT:    vfmin.vv v8, v8, v16
 ; RV64-NEXT:    vfredmin.vs v8, v8, v24
 ; RV64-NEXT:    vfmv.f.s fa0, v8
@@ -1655,9 +1655,9 @@ define float @vreduce_fmax_v4f32_nonans_noinfs(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI91_0)
-; RV32-NEXT:    flw ft0, %lo(.LCPI91_0)(a1)
+; RV32-NEXT:    flw fa5, %lo(.LCPI91_0)(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v9, ft0
+; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vfredmax.vs v8, v8, v9
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1665,10 +1665,10 @@ define float @vreduce_fmax_v4f32_nonans_noinfs(ptr %x) {
 ; RV64-LABEL: vreduce_fmax_v4f32_nonans_noinfs:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI91_0)
-; RV64-NEXT:    flw ft0, %lo(.LCPI91_0)(a1)
+; RV64-NEXT:    flw fa5, %lo(.LCPI91_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; RV64-NEXT:    vle32.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v9, ft0
+; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vfredmax.vs v8, v8, v9
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1711,9 +1711,9 @@ define double @vreduce_fmax_v2f64(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI93_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI93_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI93_0)(a1)
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v9, ft0
+; RV32-NEXT:    vfmv.s.f v9, fa5
 ; RV32-NEXT:    vfredmax.vs v8, v8, v9
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1721,10 +1721,10 @@ define double @vreduce_fmax_v2f64(ptr %x) {
 ; RV64-LABEL: vreduce_fmax_v2f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI93_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI93_0)(a1)
+; RV64-NEXT:    fld fa5, %lo(.LCPI93_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v9, ft0
+; RV64-NEXT:    vfmv.s.f v9, fa5
 ; RV64-NEXT:    vfredmax.vs v8, v8, v9
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1740,9 +1740,9 @@ define double @vreduce_fmax_v4f64(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI94_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI94_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI94_0)(a1)
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v10, ft0
+; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vfredmax.vs v8, v8, v10
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1750,10 +1750,10 @@ define double @vreduce_fmax_v4f64(ptr %x) {
 ; RV64-LABEL: vreduce_fmax_v4f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI94_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI94_0)(a1)
+; RV64-NEXT:    fld fa5, %lo(.LCPI94_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v10, ft0
+; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vfredmax.vs v8, v8, v10
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1767,9 +1767,9 @@ define double @vreduce_fmax_v4f64_nonans(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI95_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI95_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI95_0)(a1)
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v10, ft0
+; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vfredmax.vs v8, v8, v10
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1777,10 +1777,10 @@ define double @vreduce_fmax_v4f64_nonans(ptr %x) {
 ; RV64-LABEL: vreduce_fmax_v4f64_nonans:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI95_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI95_0)(a1)
+; RV64-NEXT:    fld fa5, %lo(.LCPI95_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v10, ft0
+; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vfredmax.vs v8, v8, v10
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1794,9 +1794,9 @@ define double @vreduce_fmax_v4f64_nonans_noinfs(ptr %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    lui a1, %hi(.LCPI96_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI96_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI96_0)(a1)
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    vfmv.s.f v10, ft0
+; RV32-NEXT:    vfmv.s.f v10, fa5
 ; RV32-NEXT:    vfredmax.vs v8, v8, v10
 ; RV32-NEXT:    vfmv.f.s fa0, v8
 ; RV32-NEXT:    ret
@@ -1804,10 +1804,10 @@ define double @vreduce_fmax_v4f64_nonans_noinfs(ptr %x) {
 ; RV64-LABEL: vreduce_fmax_v4f64_nonans_noinfs:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI96_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI96_0)(a1)
+; RV64-NEXT:    fld fa5, %lo(.LCPI96_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    vfmv.s.f v10, ft0
+; RV64-NEXT:    vfmv.s.f v10, fa5
 ; RV64-NEXT:    vfredmax.vs v8, v8, v10
 ; RV64-NEXT:    vfmv.f.s fa0, v8
 ; RV64-NEXT:    ret
@@ -1825,9 +1825,9 @@ define double @vreduce_fmax_v32f64(ptr %x) {
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 128
 ; RV32-NEXT:    lui a1, %hi(.LCPI97_0)
-; RV32-NEXT:    fld ft0, %lo(.LCPI97_0)(a1)
+; RV32-NEXT:    fld fa5, %lo(.LCPI97_0)(a1)
 ; RV32-NEXT:    vle64.v v16, (a0)
-; RV32-NEXT:    vfmv.s.f v24, ft0
+; RV32-NEXT:    vfmv.s.f v24, fa5
 ; RV32-NEXT:    vfmax.vv v8, v8, v16
 ; RV32-NEXT:    vfredmax.vs v8, v8, v24
 ; RV32-NEXT:    vfmv.f.s fa0, v8
@@ -1836,12 +1836,12 @@ define double @vreduce_fmax_v32f64(ptr %x) {
 ; RV64-LABEL: vreduce_fmax_v32f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI97_0)
-; RV64-NEXT:    fld ft0, %lo(.LCPI97_0)(a1)
+; RV64-NEXT:    fld fa5, %lo(.LCPI97_0)(a1)
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 128
 ; RV64-NEXT:    vle64.v v16, (a0)
-; RV64-NEXT:    vfmv.s.f v24, ft0
+; RV64-NEXT:    vfmv.s.f v24, fa5
 ; RV64-NEXT:    vfmax.vv v8, v8, v16
 ; RV64-NEXT:    vfredmax.vs v8, v8, v24
 ; RV64-NEXT:    vfmv.f.s fa0, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 156f2aa974d4..074062203532 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -10,11 +10,11 @@ define <2 x half> @vp_rint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_rint_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -29,13 +29,13 @@ define <2 x half> @vp_rint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -54,11 +54,11 @@ define <4 x half> @vp_rint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_rint_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -73,13 +73,13 @@ define <4 x half> @vp_rint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -98,11 +98,11 @@ define <8 x half> @vp_rint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_rint_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -117,13 +117,13 @@ define <8 x half> @vp_rint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -143,11 +143,11 @@ define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -163,14 +163,14 @@ define <16 x half> @vp_rint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -192,9 +192,9 @@ define <2 x float> @vp_rint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -213,9 +213,9 @@ define <2 x float> @vp_rint_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -236,9 +236,9 @@ define <4 x float> @vp_rint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -257,9 +257,9 @@ define <4 x float> @vp_rint_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -281,9 +281,9 @@ define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -304,9 +304,9 @@ define <8 x float> @vp_rint_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -329,9 +329,9 @@ define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -352,9 +352,9 @@ define <16 x float> @vp_rint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -374,11 +374,11 @@ define <2 x double> @vp_rint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %e
 ; CHECK-LABEL: vp_rint_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI16_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -393,13 +393,13 @@ define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_rint_v2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI17_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -419,11 +419,11 @@ define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %e
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI18_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -439,14 +439,14 @@ define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_rint_v4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI19_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -467,11 +467,11 @@ define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI20_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -487,14 +487,14 @@ define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_rint_v8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI21_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v12
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -515,11 +515,11 @@ define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroex
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -535,14 +535,14 @@ define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 ; CHECK-LABEL: vp_rint_v15f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -563,11 +563,11 @@ define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroex
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -583,14 +583,14 @@ define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 ; CHECK-LABEL: vp_rint_v16f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -625,12 +625,12 @@ define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    sub sp, sp, a2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    lui a2, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -648,7 +648,7 @@ define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -682,13 +682,13 @@ define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    sub sp, sp, a2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -706,7 +706,7 @@ define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index 205f7db94576..fb859506e061 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -10,11 +10,11 @@ define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_round_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,13 +31,13 @@ define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -58,11 +58,11 @@ define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_round_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -79,13 +79,13 @@ define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -106,11 +106,11 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-LABEL: vp_round_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -127,13 +127,13 @@ define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -155,11 +155,11 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -177,14 +177,14 @@ define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_round_v16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -208,9 +208,9 @@ define <2 x float> @vp_round_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -231,9 +231,9 @@ define <2 x float> @vp_round_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -256,9 +256,9 @@ define <4 x float> @vp_round_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -279,9 +279,9 @@ define <4 x float> @vp_round_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -305,9 +305,9 @@ define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -330,9 +330,9 @@ define <8 x float> @vp_round_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -357,9 +357,9 @@ define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -382,9 +382,9 @@ define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -406,11 +406,11 @@ define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ; CHECK-LABEL: vp_round_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI16_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -427,13 +427,13 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_round_v2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI17_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -455,11 +455,11 @@ define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI18_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -477,14 +477,14 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_round_v4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI19_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -507,11 +507,11 @@ define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI20_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -529,14 +529,14 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_round_v8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI21_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v12
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -559,11 +559,11 @@ define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -581,14 +581,14 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-LABEL: vp_round_v15f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -611,11 +611,11 @@ define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -633,14 +633,14 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-LABEL: vp_round_v16f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -682,12 +682,12 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    li a1, 16
 ; CHECK-NEXT:  .LBB26_2:
 ; CHECK-NEXT:    lui a2, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v25, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a1, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -713,7 +713,7 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -749,13 +749,13 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    sub sp, sp, a2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a1, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -775,7 +775,7 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 4084f7450a80..9f7029e8b039 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -10,11 +10,11 @@ define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %
 ; CHECK-LABEL: vp_roundeven_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,13 +31,13 @@ define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_roundeven_v2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -58,11 +58,11 @@ define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %
 ; CHECK-LABEL: vp_roundeven_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -79,13 +79,13 @@ define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_roundeven_v4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -106,11 +106,11 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK-LABEL: vp_roundeven_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -127,13 +127,13 @@ define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vp_roundeven_v8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -155,11 +155,11 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -177,14 +177,14 @@ define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %e
 ; CHECK-LABEL: vp_roundeven_v16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -208,9 +208,9 @@ define <2 x float> @vp_roundeven_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -231,9 +231,9 @@ define <2 x float> @vp_roundeven_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -256,9 +256,9 @@ define <4 x float> @vp_roundeven_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -279,9 +279,9 @@ define <4 x float> @vp_roundeven_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -305,9 +305,9 @@ define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -330,9 +330,9 @@ define <8 x float> @vp_roundeven_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -357,9 +357,9 @@ define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -382,9 +382,9 @@ define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -406,11 +406,11 @@ define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroe
 ; CHECK-LABEL: vp_roundeven_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI16_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -427,13 +427,13 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 ; CHECK-LABEL: vp_roundeven_v2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI17_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -455,11 +455,11 @@ define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI18_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -477,14 +477,14 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 ; CHECK-LABEL: vp_roundeven_v4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI19_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -507,11 +507,11 @@ define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroe
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI20_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -529,14 +529,14 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 ; CHECK-LABEL: vp_roundeven_v8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI21_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v12
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -559,11 +559,11 @@ define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 z
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -581,14 +581,14 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex
 ; CHECK-LABEL: vp_roundeven_v15f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -611,11 +611,11 @@ define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 z
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -633,14 +633,14 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex
 ; CHECK-LABEL: vp_roundeven_v16f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -682,12 +682,12 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 ; CHECK-NEXT:    li a1, 16
 ; CHECK-NEXT:  .LBB26_2:
 ; CHECK-NEXT:    lui a2, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v25, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a1, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -713,7 +713,7 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -749,13 +749,13 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroex
 ; CHECK-NEXT:    sub sp, sp, a2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a1, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -775,7 +775,7 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index af829b968531..b9ecf8eca910 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -10,11 +10,11 @@ define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext
 ; CHECK-LABEL: vp_roundtozero_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,13 +31,13 @@ define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %ev
 ; CHECK-LABEL: vp_roundtozero_v2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -58,11 +58,11 @@ define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext
 ; CHECK-LABEL: vp_roundtozero_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -79,13 +79,13 @@ define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %ev
 ; CHECK-LABEL: vp_roundtozero_v4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -106,11 +106,11 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-LABEL: vp_roundtozero_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -127,13 +127,13 @@ define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %ev
 ; CHECK-LABEL: vp_roundtozero_v8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -155,11 +155,11 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -177,14 +177,14 @@ define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext
 ; CHECK-LABEL: vp_roundtozero_v16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -208,9 +208,9 @@ define <2 x float> @vp_roundtozero_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -231,9 +231,9 @@ define <2 x float> @vp_roundtozero_v2f32_unmasked(<2 x float> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -256,9 +256,9 @@ define <4 x float> @vp_roundtozero_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -279,9 +279,9 @@ define <4 x float> @vp_roundtozero_v4f32_unmasked(<4 x float> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -305,9 +305,9 @@ define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -330,9 +330,9 @@ define <8 x float> @vp_roundtozero_v8f32_unmasked(<8 x float> %va, i32 zeroext %
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -357,9 +357,9 @@ define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -382,9 +382,9 @@ define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroex
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -406,11 +406,11 @@ define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zer
 ; CHECK-LABEL: vp_roundtozero_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI16_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -427,13 +427,13 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext
 ; CHECK-LABEL: vp_roundtozero_v2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI17_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -455,11 +455,11 @@ define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zer
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI18_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -477,14 +477,14 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext
 ; CHECK-LABEL: vp_roundtozero_v4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI19_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v10
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -507,11 +507,11 @@ define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zer
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI20_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -529,14 +529,14 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext
 ; CHECK-LABEL: vp_roundtozero_v8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI21_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v12
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -559,11 +559,11 @@ define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -581,14 +581,14 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero
 ; CHECK-LABEL: vp_roundtozero_v15f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -611,11 +611,11 @@ define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -633,14 +633,14 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero
 ; CHECK-LABEL: vp_roundtozero_v16f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -682,12 +682,12 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
 ; CHECK-NEXT:    li a1, 16
 ; CHECK-NEXT:  .LBB26_2:
 ; CHECK-NEXT:    lui a2, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v25, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a1, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -713,7 +713,7 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -749,13 +749,13 @@ define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zero
 ; CHECK-NEXT:    sub sp, sp, a2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a2)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a1, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -775,7 +775,7 @@ define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zero
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll
index 37eed0918bda..af1a20ce6f97 100644
--- a/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll
@@ -16,8 +16,8 @@ define <vscale x 1 x i8> @trunc_nxv1f32_to_si8(<vscale x 1 x float> %x) {
 ; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
 ; RV32-NEXT:    lui a0, 307200
-; RV32-NEXT:    fmv.w.x ft0, a0
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
@@ -33,8 +33,8 @@ define <vscale x 1 x i8> @trunc_nxv1f32_to_si8(<vscale x 1 x float> %x) {
 ; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
 ; RV64-NEXT:    lui a0, 307200
-; RV64-NEXT:    fmv.w.x ft0, a0
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
@@ -55,8 +55,8 @@ define <vscale x 1 x i8> @trunc_nxv1f32_to_ui8(<vscale x 1 x float> %x) {
 ; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
 ; RV32-NEXT:    lui a0, 307200
-; RV32-NEXT:    fmv.w.x ft0, a0
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
@@ -72,8 +72,8 @@ define <vscale x 1 x i8> @trunc_nxv1f32_to_ui8(<vscale x 1 x float> %x) {
 ; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
 ; RV64-NEXT:    lui a0, 307200
-; RV64-NEXT:    fmv.w.x ft0, a0
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
@@ -210,8 +210,8 @@ define <vscale x 4 x i8> @trunc_nxv4f32_to_si8(<vscale x 4 x float> %x) {
 ; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; RV32-NEXT:    vfabs.v v10, v8
 ; RV32-NEXT:    lui a0, 307200
-; RV32-NEXT:    fmv.w.x ft0, a0
-; RV32-NEXT:    vmflt.vf v0, v10, ft0
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
@@ -227,8 +227,8 @@ define <vscale x 4 x i8> @trunc_nxv4f32_to_si8(<vscale x 4 x float> %x) {
 ; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; RV64-NEXT:    vfabs.v v10, v8
 ; RV64-NEXT:    lui a0, 307200
-; RV64-NEXT:    fmv.w.x ft0, a0
-; RV64-NEXT:    vmflt.vf v0, v10, ft0
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
@@ -249,8 +249,8 @@ define <vscale x 4 x i8> @trunc_nxv4f32_to_ui8(<vscale x 4 x float> %x) {
 ; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; RV32-NEXT:    vfabs.v v10, v8
 ; RV32-NEXT:    lui a0, 307200
-; RV32-NEXT:    fmv.w.x ft0, a0
-; RV32-NEXT:    vmflt.vf v0, v10, ft0
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
@@ -266,8 +266,8 @@ define <vscale x 4 x i8> @trunc_nxv4f32_to_ui8(<vscale x 4 x float> %x) {
 ; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; RV64-NEXT:    vfabs.v v10, v8
 ; RV64-NEXT:    lui a0, 307200
-; RV64-NEXT:    fmv.w.x ft0, a0
-; RV64-NEXT:    vmflt.vf v0, v10, ft0
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
@@ -404,8 +404,8 @@ define <vscale x 1 x i8> @ceil_nxv1f32_to_si8(<vscale x 1 x float> %x) {
 ; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
 ; RV32-NEXT:    lui a0, 307200
-; RV32-NEXT:    fmv.w.x ft0, a0
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -423,8 +423,8 @@ define <vscale x 1 x i8> @ceil_nxv1f32_to_si8(<vscale x 1 x float> %x) {
 ; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
 ; RV64-NEXT:    lui a0, 307200
-; RV64-NEXT:    fmv.w.x ft0, a0
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -447,8 +447,8 @@ define <vscale x 1 x i8> @ceil_nxv1f32_to_ui8(<vscale x 1 x float> %x) {
 ; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
 ; RV32-NEXT:    lui a0, 307200
-; RV32-NEXT:    fmv.w.x ft0, a0
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -466,8 +466,8 @@ define <vscale x 1 x i8> @ceil_nxv1f32_to_ui8(<vscale x 1 x float> %x) {
 ; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
 ; RV64-NEXT:    lui a0, 307200
-; RV64-NEXT:    fmv.w.x ft0, a0
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -642,8 +642,8 @@ define <vscale x 4 x i8> @ceil_nxv4f32_to_si8(<vscale x 4 x float> %x) {
 ; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; RV32-NEXT:    vfabs.v v10, v8
 ; RV32-NEXT:    lui a0, 307200
-; RV32-NEXT:    fmv.w.x ft0, a0
-; RV32-NEXT:    vmflt.vf v0, v10, ft0
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -661,8 +661,8 @@ define <vscale x 4 x i8> @ceil_nxv4f32_to_si8(<vscale x 4 x float> %x) {
 ; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; RV64-NEXT:    vfabs.v v10, v8
 ; RV64-NEXT:    lui a0, 307200
-; RV64-NEXT:    fmv.w.x ft0, a0
-; RV64-NEXT:    vmflt.vf v0, v10, ft0
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -685,8 +685,8 @@ define <vscale x 4 x i8> @ceil_nxv4f32_to_ui8(<vscale x 4 x float> %x) {
 ; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; RV32-NEXT:    vfabs.v v10, v8
 ; RV32-NEXT:    lui a0, 307200
-; RV32-NEXT:    fmv.w.x ft0, a0
-; RV32-NEXT:    vmflt.vf v0, v10, ft0
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -704,8 +704,8 @@ define <vscale x 4 x i8> @ceil_nxv4f32_to_ui8(<vscale x 4 x float> %x) {
 ; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; RV64-NEXT:    vfabs.v v10, v8
 ; RV64-NEXT:    lui a0, 307200
-; RV64-NEXT:    fmv.w.x ft0, a0
-; RV64-NEXT:    vmflt.vf v0, v10, ft0
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index d6450aaba45d..c4aae5c84876 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -10,11 +10,11 @@ define <vscale x 1 x half> @vp_floor_nxv1f16(<vscale x 1 x half> %va, <vscale x
 ; CHECK-LABEL: vp_floor_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,10 +31,10 @@ define <vscale x 1 x half> @vp_floor_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ; CHECK-LABEL: vp_floor_nxv1f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -54,11 +54,11 @@ define <vscale x 2 x half> @vp_floor_nxv2f16(<vscale x 2 x half> %va, <vscale x
 ; CHECK-LABEL: vp_floor_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -75,10 +75,10 @@ define <vscale x 2 x half> @vp_floor_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ; CHECK-LABEL: vp_floor_nxv2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -98,11 +98,11 @@ define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x
 ; CHECK-LABEL: vp_floor_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -119,10 +119,10 @@ define <vscale x 4 x half> @vp_floor_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ; CHECK-LABEL: vp_floor_nxv4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -143,11 +143,11 @@ define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -165,10 +165,10 @@ define <vscale x 8 x half> @vp_floor_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ; CHECK-LABEL: vp_floor_nxv8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -189,11 +189,11 @@ define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI8_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI8_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -211,10 +211,10 @@ define <vscale x 16 x half> @vp_floor_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; CHECK-LABEL: vp_floor_nxv16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI9_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI9_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI9_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -235,11 +235,11 @@ define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI10_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI10_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -257,10 +257,10 @@ define <vscale x 32 x half> @vp_floor_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; CHECK-LABEL: vp_floor_nxv32f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI11_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI11_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI11_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -282,9 +282,9 @@ define <vscale x 1 x float> @vp_floor_nxv1f32(<vscale x 1 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -303,8 +303,8 @@ define <vscale x 1 x float> @vp_floor_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -326,9 +326,9 @@ define <vscale x 2 x float> @vp_floor_nxv2f32(<vscale x 2 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -347,8 +347,8 @@ define <vscale x 2 x float> @vp_floor_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -371,9 +371,9 @@ define <vscale x 4 x float> @vp_floor_nxv4f32(<vscale x 4 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -393,8 +393,8 @@ define <vscale x 4 x float> @vp_floor_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -417,9 +417,9 @@ define <vscale x 8 x float> @vp_floor_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -439,8 +439,8 @@ define <vscale x 8 x float> @vp_floor_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -463,9 +463,9 @@ define <vscale x 16 x float> @vp_floor_nxv16f32(<vscale x 16 x float> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -485,8 +485,8 @@ define <vscale x 16 x float> @vp_floor_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -506,11 +506,11 @@ define <vscale x 1 x double> @vp_floor_nxv1f64(<vscale x 1 x double> %va, <vscal
 ; CHECK-LABEL: vp_floor_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -527,10 +527,10 @@ define <vscale x 1 x double> @vp_floor_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-LABEL: vp_floor_nxv1f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -551,11 +551,11 @@ define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -573,10 +573,10 @@ define <vscale x 2 x double> @vp_floor_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-LABEL: vp_floor_nxv2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -597,11 +597,11 @@ define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -619,10 +619,10 @@ define <vscale x 4 x double> @vp_floor_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-LABEL: vp_floor_nxv4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -643,11 +643,11 @@ define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI28_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI28_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -665,10 +665,10 @@ define <vscale x 7 x double> @vp_floor_nxv7f64_unmasked(<vscale x 7 x double> %v
 ; CHECK-LABEL: vp_floor_nxv7f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI29_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI29_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI29_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -689,11 +689,11 @@ define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI30_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI30_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -711,10 +711,10 @@ define <vscale x 8 x double> @vp_floor_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-LABEL: vp_floor_nxv8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI31_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI31_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI31_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -750,12 +750,12 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI32_0)(a3)
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a2, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -776,7 +776,7 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -802,13 +802,13 @@ define <vscale x 16 x double> @vp_floor_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    sub a2, a0, a1
 ; CHECK-NEXT:    lui a3, %hi(.LCPI33_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI33_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI33_0)(a3)
 ; CHECK-NEXT:    sltu a3, a0, a2
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a2, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; CHECK-NEXT:    fsrm a2
@@ -821,7 +821,7 @@ define <vscale x 16 x double> @vp_floor_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:  .LBB33_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
index b7cc6f50fc01..15d4e707ee5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
@@ -155,12 +155,12 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
 ; CHECK-LABEL: test_signed_v4f64_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI10_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI10_0)(a0)
 ; CHECK-NEXT:    lui a0, %hi(.LCPI10_1)
-; CHECK-NEXT:    fld ft1, %lo(.LCPI10_1)(a0)
+; CHECK-NEXT:    fld fa4, %lo(.LCPI10_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vfmax.vf v12, v8, ft0
-; CHECK-NEXT:    vfmin.vf v12, v12, ft1
+; CHECK-NEXT:    vfmax.vf v12, v8, fa5
+; CHECK-NEXT:    vfmin.vf v12, v12, fa4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
@@ -178,12 +178,12 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK-LABEL: test_signed_v8f64_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_1)
-; CHECK-NEXT:    fld ft1, %lo(.LCPI11_1)(a0)
+; CHECK-NEXT:    fld fa4, %lo(.LCPI11_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vfmax.vf v16, v8, ft0
-; CHECK-NEXT:    vfmin.vf v16, v16, ft1
+; CHECK-NEXT:    vfmax.vf v16, v8, fa5
+; CHECK-NEXT:    vfmin.vf v16, v16, fa4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v24, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll
index a645ce8660e7..dc068ef7a63a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll
@@ -155,11 +155,11 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
 ; CHECK32-LABEL: test_signed_v4f64_v4i16:
 ; CHECK32:       # %bb.0:
 ; CHECK32-NEXT:    lui a0, %hi(.LCPI10_0)
-; CHECK32-NEXT:    fld ft0, %lo(.LCPI10_0)(a0)
-; CHECK32-NEXT:    fcvt.d.w ft1, zero
+; CHECK32-NEXT:    fld fa5, %lo(.LCPI10_0)(a0)
+; CHECK32-NEXT:    fcvt.d.w fa4, zero
 ; CHECK32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK32-NEXT:    vfmax.vf v8, v8, ft1
-; CHECK32-NEXT:    vfmin.vf v8, v8, ft0
+; CHECK32-NEXT:    vfmax.vf v8, v8, fa4
+; CHECK32-NEXT:    vfmin.vf v8, v8, fa5
 ; CHECK32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK32-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; CHECK32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
@@ -169,11 +169,11 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
 ; CHECK64-LABEL: test_signed_v4f64_v4i16:
 ; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    lui a0, %hi(.LCPI10_0)
-; CHECK64-NEXT:    fld ft0, %lo(.LCPI10_0)(a0)
-; CHECK64-NEXT:    fmv.d.x ft1, zero
+; CHECK64-NEXT:    fld fa5, %lo(.LCPI10_0)(a0)
+; CHECK64-NEXT:    fmv.d.x fa4, zero
 ; CHECK64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK64-NEXT:    vfmax.vf v8, v8, ft1
-; CHECK64-NEXT:    vfmin.vf v8, v8, ft0
+; CHECK64-NEXT:    vfmax.vf v8, v8, fa4
+; CHECK64-NEXT:    vfmin.vf v8, v8, fa5
 ; CHECK64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK64-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; CHECK64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
@@ -187,11 +187,11 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK32-LABEL: test_signed_v8f64_v8i16:
 ; CHECK32:       # %bb.0:
 ; CHECK32-NEXT:    lui a0, %hi(.LCPI11_0)
-; CHECK32-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
-; CHECK32-NEXT:    fcvt.d.w ft1, zero
+; CHECK32-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
+; CHECK32-NEXT:    fcvt.d.w fa4, zero
 ; CHECK32-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK32-NEXT:    vfmax.vf v8, v8, ft1
-; CHECK32-NEXT:    vfmin.vf v8, v8, ft0
+; CHECK32-NEXT:    vfmax.vf v8, v8, fa4
+; CHECK32-NEXT:    vfmin.vf v8, v8, fa5
 ; CHECK32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK32-NEXT:    vfncvt.rtz.xu.f.w v16, v8
 ; CHECK32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
@@ -201,11 +201,11 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
 ; CHECK64-LABEL: test_signed_v8f64_v8i16:
 ; CHECK64:       # %bb.0:
 ; CHECK64-NEXT:    lui a0, %hi(.LCPI11_0)
-; CHECK64-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
-; CHECK64-NEXT:    fmv.d.x ft1, zero
+; CHECK64-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
+; CHECK64-NEXT:    fmv.d.x fa4, zero
 ; CHECK64-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK64-NEXT:    vfmax.vf v8, v8, ft1
-; CHECK64-NEXT:    vfmin.vf v8, v8, ft0
+; CHECK64-NEXT:    vfmax.vf v8, v8, fa4
+; CHECK64-NEXT:    vfmin.vf v8, v8, fa5
 ; CHECK64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK64-NEXT:    vfncvt.rtz.xu.f.w v16, v8
 ; CHECK64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
index 34cf50d29943..9765f7d731c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
@@ -10,10 +10,10 @@ define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) {
 ; CHECK-LABEL: round_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -30,10 +30,10 @@ define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) {
 ; CHECK-LABEL: round_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -50,10 +50,10 @@ define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) {
 ; CHECK-LABEL: round_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -70,10 +70,10 @@ define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) {
 ; CHECK-LABEL: round_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -90,10 +90,10 @@ define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) {
 ; CHECK-LABEL: round_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -110,10 +110,10 @@ define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) {
 ; CHECK-LABEL: round_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -132,8 +132,8 @@ define <vscale x 1 x float> @round_nxv1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -152,8 +152,8 @@ define <vscale x 2 x float> @round_nxv2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -172,8 +172,8 @@ define <vscale x 4 x float> @round_nxv4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -192,8 +192,8 @@ define <vscale x 8 x float> @round_nxv8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -212,8 +212,8 @@ define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -230,10 +230,10 @@ define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) {
 ; CHECK-LABEL: round_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -250,10 +250,10 @@ define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) {
 ; CHECK-LABEL: round_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -270,10 +270,10 @@ define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) {
 ; CHECK-LABEL: round_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -290,10 +290,10 @@ define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) {
 ; CHECK-LABEL: round_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI14_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
index 6f5575173cbf..e20691e74d13 100644
--- a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
@@ -10,10 +10,10 @@ define <vscale x 1 x half> @roundeven_nxv1f16(<vscale x 1 x half> %x) {
 ; CHECK-LABEL: roundeven_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -30,10 +30,10 @@ define <vscale x 2 x half> @roundeven_nxv2f16(<vscale x 2 x half> %x) {
 ; CHECK-LABEL: roundeven_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -50,10 +50,10 @@ define <vscale x 4 x half> @roundeven_nxv4f16(<vscale x 4 x half> %x) {
 ; CHECK-LABEL: roundeven_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -70,10 +70,10 @@ define <vscale x 8 x half> @roundeven_nxv8f16(<vscale x 8 x half> %x) {
 ; CHECK-LABEL: roundeven_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -90,10 +90,10 @@ define <vscale x 16 x half> @roundeven_nxv16f16(<vscale x 16 x half> %x) {
 ; CHECK-LABEL: roundeven_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -110,10 +110,10 @@ define <vscale x 32 x half> @roundeven_nxv32f16(<vscale x 32 x half> %x) {
 ; CHECK-LABEL: roundeven_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -132,8 +132,8 @@ define <vscale x 1 x float> @roundeven_nxv1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -152,8 +152,8 @@ define <vscale x 2 x float> @roundeven_nxv2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -172,8 +172,8 @@ define <vscale x 4 x float> @roundeven_nxv4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -192,8 +192,8 @@ define <vscale x 8 x float> @roundeven_nxv8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -212,8 +212,8 @@ define <vscale x 16 x float> @roundeven_nxv16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -230,10 +230,10 @@ define <vscale x 1 x double> @roundeven_nxv1f64(<vscale x 1 x double> %x) {
 ; CHECK-LABEL: roundeven_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -250,10 +250,10 @@ define <vscale x 2 x double> @roundeven_nxv2f64(<vscale x 2 x double> %x) {
 ; CHECK-LABEL: roundeven_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -270,10 +270,10 @@ define <vscale x 4 x double> @roundeven_nxv4f64(<vscale x 4 x double> %x) {
 ; CHECK-LABEL: roundeven_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -290,10 +290,10 @@ define <vscale x 8 x double> @roundeven_nxv8f64(<vscale x 8 x double> %x) {
 ; CHECK-LABEL: roundeven_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI14_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
index 3bf1185d9997..2615c5f3da59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
@@ -8,10 +8,10 @@ define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) {
 ; CHECK-LABEL: trunc_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -26,10 +26,10 @@ define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) {
 ; CHECK-LABEL: trunc_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
@@ -44,10 +44,10 @@ define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) {
 ; CHECK-LABEL: trunc_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -62,10 +62,10 @@ define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) {
 ; CHECK-LABEL: trunc_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
@@ -80,10 +80,10 @@ define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) {
 ; CHECK-LABEL: trunc_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
@@ -98,10 +98,10 @@ define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) {
 ; CHECK-LABEL: trunc_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
@@ -118,8 +118,8 @@ define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
@@ -136,8 +136,8 @@ define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
@@ -154,8 +154,8 @@ define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
@@ -172,8 +172,8 @@ define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
@@ -190,8 +190,8 @@ define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
@@ -206,10 +206,10 @@ define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) {
 ; CHECK-LABEL: trunc_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -224,10 +224,10 @@ define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) {
 ; CHECK-LABEL: trunc_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -242,10 +242,10 @@ define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) {
 ; CHECK-LABEL: trunc_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -260,10 +260,10 @@ define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) {
 ; CHECK-LABEL: trunc_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI14_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll
index e291ebf3789b..a37d3eb2ff31 100644
--- a/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll
@@ -124,10 +124,10 @@ define <vscale x 1 x i64> @trunc_nxv1f16_to_si64(<vscale x 1 x half> %x) {
 ; RV32-LABEL: trunc_nxv1f16_to_si64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI6_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI6_0)(a0)
+; RV32-NEXT:    flh fa5, %lo(.LCPI6_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -140,10 +140,10 @@ define <vscale x 1 x i64> @trunc_nxv1f16_to_si64(<vscale x 1 x half> %x) {
 ; RV64-LABEL: trunc_nxv1f16_to_si64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI6_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI6_0)(a0)
+; RV64-NEXT:    flh fa5, %lo(.LCPI6_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -161,10 +161,10 @@ define <vscale x 1 x i64> @trunc_nxv1f16_to_ui64(<vscale x 1 x half> %x) {
 ; RV32-LABEL: trunc_nxv1f16_to_ui64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI7_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI7_0)(a0)
+; RV32-NEXT:    flh fa5, %lo(.LCPI7_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -177,10 +177,10 @@ define <vscale x 1 x i64> @trunc_nxv1f16_to_ui64(<vscale x 1 x half> %x) {
 ; RV64-LABEL: trunc_nxv1f16_to_ui64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI7_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI7_0)(a0)
+; RV64-NEXT:    flh fa5, %lo(.LCPI7_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -314,10 +314,10 @@ define <vscale x 4 x i64> @trunc_nxv4f16_to_si64(<vscale x 4 x half> %x) {
 ; RV32-LABEL: trunc_nxv4f16_to_si64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI14_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI14_0)(a0)
+; RV32-NEXT:    flh fa5, %lo(.LCPI14_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -330,10 +330,10 @@ define <vscale x 4 x i64> @trunc_nxv4f16_to_si64(<vscale x 4 x half> %x) {
 ; RV64-LABEL: trunc_nxv4f16_to_si64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI14_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI14_0)(a0)
+; RV64-NEXT:    flh fa5, %lo(.LCPI14_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -351,10 +351,10 @@ define <vscale x 4 x i64> @trunc_nxv4f16_to_ui64(<vscale x 4 x half> %x) {
 ; RV32-LABEL: trunc_nxv4f16_to_ui64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI15_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI15_0)(a0)
+; RV32-NEXT:    flh fa5, %lo(.LCPI15_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -367,10 +367,10 @@ define <vscale x 4 x i64> @trunc_nxv4f16_to_ui64(<vscale x 4 x half> %x) {
 ; RV64-LABEL: trunc_nxv4f16_to_ui64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI15_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI15_0)(a0)
+; RV64-NEXT:    flh fa5, %lo(.LCPI15_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -540,10 +540,10 @@ define <vscale x 1 x i64> @ceil_nxv1f16_to_si64(<vscale x 1 x half> %x) {
 ; RV32-LABEL: ceil_nxv1f16_to_si64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI22_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI22_0)(a0)
+; RV32-NEXT:    flh fa5, %lo(.LCPI22_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -558,10 +558,10 @@ define <vscale x 1 x i64> @ceil_nxv1f16_to_si64(<vscale x 1 x half> %x) {
 ; RV64-LABEL: ceil_nxv1f16_to_si64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI22_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI22_0)(a0)
+; RV64-NEXT:    flh fa5, %lo(.LCPI22_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -581,10 +581,10 @@ define <vscale x 1 x i64> @ceil_nxv1f16_to_ui64(<vscale x 1 x half> %x) {
 ; RV32-LABEL: ceil_nxv1f16_to_ui64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI23_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI23_0)(a0)
+; RV32-NEXT:    flh fa5, %lo(.LCPI23_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -599,10 +599,10 @@ define <vscale x 1 x i64> @ceil_nxv1f16_to_ui64(<vscale x 1 x half> %x) {
 ; RV64-LABEL: ceil_nxv1f16_to_ui64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI23_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI23_0)(a0)
+; RV64-NEXT:    flh fa5, %lo(.LCPI23_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -774,10 +774,10 @@ define <vscale x 4 x i64> @ceil_nxv4f16_to_si64(<vscale x 4 x half> %x) {
 ; RV32-LABEL: ceil_nxv4f16_to_si64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI30_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI30_0)(a0)
+; RV32-NEXT:    flh fa5, %lo(.LCPI30_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -792,10 +792,10 @@ define <vscale x 4 x i64> @ceil_nxv4f16_to_si64(<vscale x 4 x half> %x) {
 ; RV64-LABEL: ceil_nxv4f16_to_si64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI30_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI30_0)(a0)
+; RV64-NEXT:    flh fa5, %lo(.LCPI30_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    fsrm a0
@@ -815,10 +815,10 @@ define <vscale x 4 x i64> @ceil_nxv4f16_to_ui64(<vscale x 4 x half> %x) {
 ; RV32-LABEL: ceil_nxv4f16_to_ui64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI31_0)
-; RV32-NEXT:    flh ft0, %lo(.LCPI31_0)(a0)
+; RV32-NEXT:    flh fa5, %lo(.LCPI31_0)(a0)
 ; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
-; RV32-NEXT:    vmflt.vf v0, v9, ft0
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    fsrmi a0, 3
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    fsrm a0
@@ -833,10 +833,10 @@ define <vscale x 4 x i64> @ceil_nxv4f16_to_ui64(<vscale x 4 x half> %x) {
 ; RV64-LABEL: ceil_nxv4f16_to_ui64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI31_0)
-; RV64-NEXT:    flh ft0, %lo(.LCPI31_0)(a0)
+; RV64-NEXT:    flh fa5, %lo(.LCPI31_0)(a0)
 ; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
-; RV64-NEXT:    vmflt.vf v0, v9, ft0
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    fsrmi a0, 3
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 8a681c328ad0..439bd82a8378 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -10,11 +10,11 @@ define <vscale x 1 x half> @vp_nearbyint_nxv1f16(<vscale x 1 x half> %va, <vscal
 ; CHECK-LABEL: vp_nearbyint_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,10 +31,10 @@ define <vscale x 1 x half> @vp_nearbyint_nxv1f16_unmasked(<vscale x 1 x half> %v
 ; CHECK-LABEL: vp_nearbyint_nxv1f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -54,11 +54,11 @@ define <vscale x 2 x half> @vp_nearbyint_nxv2f16(<vscale x 2 x half> %va, <vscal
 ; CHECK-LABEL: vp_nearbyint_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -75,10 +75,10 @@ define <vscale x 2 x half> @vp_nearbyint_nxv2f16_unmasked(<vscale x 2 x half> %v
 ; CHECK-LABEL: vp_nearbyint_nxv2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -98,11 +98,11 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscal
 ; CHECK-LABEL: vp_nearbyint_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -119,10 +119,10 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16_unmasked(<vscale x 4 x half> %v
 ; CHECK-LABEL: vp_nearbyint_nxv4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -143,11 +143,11 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -165,10 +165,10 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16_unmasked(<vscale x 8 x half> %v
 ; CHECK-LABEL: vp_nearbyint_nxv8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -189,11 +189,11 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI8_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI8_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -211,10 +211,10 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16_unmasked(<vscale x 16 x half>
 ; CHECK-LABEL: vp_nearbyint_nxv16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI9_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI9_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI9_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -235,11 +235,11 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI10_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI10_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -257,10 +257,10 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 ; CHECK-LABEL: vp_nearbyint_nxv32f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI11_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI11_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI11_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -282,9 +282,9 @@ define <vscale x 1 x float> @vp_nearbyint_nxv1f32(<vscale x 1 x float> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -303,8 +303,8 @@ define <vscale x 1 x float> @vp_nearbyint_nxv1f32_unmasked(<vscale x 1 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -326,9 +326,9 @@ define <vscale x 2 x float> @vp_nearbyint_nxv2f32(<vscale x 2 x float> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -347,8 +347,8 @@ define <vscale x 2 x float> @vp_nearbyint_nxv2f32_unmasked(<vscale x 2 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -371,9 +371,9 @@ define <vscale x 4 x float> @vp_nearbyint_nxv4f32(<vscale x 4 x float> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -393,8 +393,8 @@ define <vscale x 4 x float> @vp_nearbyint_nxv4f32_unmasked(<vscale x 4 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -417,9 +417,9 @@ define <vscale x 8 x float> @vp_nearbyint_nxv8f32(<vscale x 8 x float> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -439,8 +439,8 @@ define <vscale x 8 x float> @vp_nearbyint_nxv8f32_unmasked(<vscale x 8 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -463,9 +463,9 @@ define <vscale x 16 x float> @vp_nearbyint_nxv16f32(<vscale x 16 x float> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -485,8 +485,8 @@ define <vscale x 16 x float> @vp_nearbyint_nxv16f32_unmasked(<vscale x 16 x floa
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -506,11 +506,11 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64(<vscale x 1 x double> %va, <v
 ; CHECK-LABEL: vp_nearbyint_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -527,10 +527,10 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64_unmasked(<vscale x 1 x double
 ; CHECK-LABEL: vp_nearbyint_nxv1f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -551,11 +551,11 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -573,10 +573,10 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64_unmasked(<vscale x 2 x double
 ; CHECK-LABEL: vp_nearbyint_nxv2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -597,11 +597,11 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -619,10 +619,10 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64_unmasked(<vscale x 4 x double
 ; CHECK-LABEL: vp_nearbyint_nxv4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -643,11 +643,11 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI28_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI28_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -665,10 +665,10 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64_unmasked(<vscale x 7 x double
 ; CHECK-LABEL: vp_nearbyint_nxv7f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI29_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI29_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI29_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -689,11 +689,11 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI30_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI30_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -711,10 +711,10 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64_unmasked(<vscale x 8 x double
 ; CHECK-LABEL: vp_nearbyint_nxv8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI31_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI31_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI31_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -744,12 +744,12 @@ define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va,
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI32_0)(a3)
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -766,7 +766,7 @@ define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va,
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -786,13 +786,13 @@ define <vscale x 16 x double> @vp_nearbyint_nxv16f64_unmasked(<vscale x 16 x dou
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    sub a2, a0, a1
 ; CHECK-NEXT:    lui a3, %hi(.LCPI33_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI33_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI33_0)(a3)
 ; CHECK-NEXT:    sltu a3, a0, a2
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    frflags a2
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
@@ -805,7 +805,7 @@ define <vscale x 16 x double> @vp_nearbyint_nxv16f64_unmasked(<vscale x 16 x dou
 ; CHECK-NEXT:  .LBB33_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index bbd71074e8b6..7d03edac82eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -10,11 +10,11 @@ define <vscale x 1 x half> @vp_rint_nxv1f16(<vscale x 1 x half> %va, <vscale x 1
 ; CHECK-LABEL: vp_rint_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -29,10 +29,10 @@ define <vscale x 1 x half> @vp_rint_nxv1f16_unmasked(<vscale x 1 x half> %va, i3
 ; CHECK-LABEL: vp_rint_nxv1f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -50,11 +50,11 @@ define <vscale x 2 x half> @vp_rint_nxv2f16(<vscale x 2 x half> %va, <vscale x 2
 ; CHECK-LABEL: vp_rint_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -69,10 +69,10 @@ define <vscale x 2 x half> @vp_rint_nxv2f16_unmasked(<vscale x 2 x half> %va, i3
 ; CHECK-LABEL: vp_rint_nxv2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
@@ -90,11 +90,11 @@ define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4
 ; CHECK-LABEL: vp_rint_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -109,10 +109,10 @@ define <vscale x 4 x half> @vp_rint_nxv4f16_unmasked(<vscale x 4 x half> %va, i3
 ; CHECK-LABEL: vp_rint_nxv4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -131,11 +131,11 @@ define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -151,10 +151,10 @@ define <vscale x 8 x half> @vp_rint_nxv8f16_unmasked(<vscale x 8 x half> %va, i3
 ; CHECK-LABEL: vp_rint_nxv8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
@@ -173,11 +173,11 @@ define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI8_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI8_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -193,10 +193,10 @@ define <vscale x 16 x half> @vp_rint_nxv16f16_unmasked(<vscale x 16 x half> %va,
 ; CHECK-LABEL: vp_rint_nxv16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI9_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI9_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI9_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
@@ -215,11 +215,11 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI10_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI10_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -235,10 +235,10 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; CHECK-LABEL: vp_rint_nxv32f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI11_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI11_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI11_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
@@ -258,9 +258,9 @@ define <vscale x 1 x float> @vp_rint_nxv1f32(<vscale x 1 x float> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -277,8 +277,8 @@ define <vscale x 1 x float> @vp_rint_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
@@ -298,9 +298,9 @@ define <vscale x 2 x float> @vp_rint_nxv2f32(<vscale x 2 x float> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -317,8 +317,8 @@ define <vscale x 2 x float> @vp_rint_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
@@ -339,9 +339,9 @@ define <vscale x 4 x float> @vp_rint_nxv4f32(<vscale x 4 x float> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -359,8 +359,8 @@ define <vscale x 4 x float> @vp_rint_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
@@ -381,9 +381,9 @@ define <vscale x 8 x float> @vp_rint_nxv8f32(<vscale x 8 x float> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -401,8 +401,8 @@ define <vscale x 8 x float> @vp_rint_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
@@ -423,9 +423,9 @@ define <vscale x 16 x float> @vp_rint_nxv16f32(<vscale x 16 x float> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -443,8 +443,8 @@ define <vscale x 16 x float> @vp_rint_nxv16f32_unmasked(<vscale x 16 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
@@ -462,11 +462,11 @@ define <vscale x 1 x double> @vp_rint_nxv1f64(<vscale x 1 x double> %va, <vscale
 ; CHECK-LABEL: vp_rint_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -481,10 +481,10 @@ define <vscale x 1 x double> @vp_rint_nxv1f64_unmasked(<vscale x 1 x double> %va
 ; CHECK-LABEL: vp_rint_nxv1f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -503,11 +503,11 @@ define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -523,10 +523,10 @@ define <vscale x 2 x double> @vp_rint_nxv2f64_unmasked(<vscale x 2 x double> %va
 ; CHECK-LABEL: vp_rint_nxv2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -545,11 +545,11 @@ define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -565,10 +565,10 @@ define <vscale x 4 x double> @vp_rint_nxv4f64_unmasked(<vscale x 4 x double> %va
 ; CHECK-LABEL: vp_rint_nxv4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -587,11 +587,11 @@ define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI28_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI28_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -607,10 +607,10 @@ define <vscale x 7 x double> @vp_rint_nxv7f64_unmasked(<vscale x 7 x double> %va
 ; CHECK-LABEL: vp_rint_nxv7f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI29_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI29_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI29_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -629,11 +629,11 @@ define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI30_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI30_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -649,10 +649,10 @@ define <vscale x 8 x double> @vp_rint_nxv8f64_unmasked(<vscale x 8 x double> %va
 ; CHECK-LABEL: vp_rint_nxv8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI31_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI31_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI31_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -686,12 +686,12 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI32_0)(a3)
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -709,7 +709,7 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v24, fa5, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
@@ -731,13 +731,13 @@ define <vscale x 16 x double> @vp_rint_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    sub a2, a0, a1
 ; CHECK-NEXT:    lui a3, %hi(.LCPI33_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI33_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI33_0)(a3)
 ; CHECK-NEXT:    sltu a3, a0, a2
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -748,7 +748,7 @@ define <vscale x 16 x double> @vp_rint_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:  .LBB33_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index b5c452e6bb22..276faf4f0967 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -10,11 +10,11 @@ define <vscale x 1 x half> @vp_round_nxv1f16(<vscale x 1 x half> %va, <vscale x
 ; CHECK-LABEL: vp_round_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,10 +31,10 @@ define <vscale x 1 x half> @vp_round_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ; CHECK-LABEL: vp_round_nxv1f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -54,11 +54,11 @@ define <vscale x 2 x half> @vp_round_nxv2f16(<vscale x 2 x half> %va, <vscale x
 ; CHECK-LABEL: vp_round_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -75,10 +75,10 @@ define <vscale x 2 x half> @vp_round_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ; CHECK-LABEL: vp_round_nxv2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -98,11 +98,11 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
 ; CHECK-LABEL: vp_round_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -119,10 +119,10 @@ define <vscale x 4 x half> @vp_round_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ; CHECK-LABEL: vp_round_nxv4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -143,11 +143,11 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -165,10 +165,10 @@ define <vscale x 8 x half> @vp_round_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ; CHECK-LABEL: vp_round_nxv8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -189,11 +189,11 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI8_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI8_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -211,10 +211,10 @@ define <vscale x 16 x half> @vp_round_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; CHECK-LABEL: vp_round_nxv16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI9_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI9_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI9_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -235,11 +235,11 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI10_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI10_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -257,10 +257,10 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; CHECK-LABEL: vp_round_nxv32f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI11_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI11_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI11_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -282,9 +282,9 @@ define <vscale x 1 x float> @vp_round_nxv1f32(<vscale x 1 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -303,8 +303,8 @@ define <vscale x 1 x float> @vp_round_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -326,9 +326,9 @@ define <vscale x 2 x float> @vp_round_nxv2f32(<vscale x 2 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -347,8 +347,8 @@ define <vscale x 2 x float> @vp_round_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -371,9 +371,9 @@ define <vscale x 4 x float> @vp_round_nxv4f32(<vscale x 4 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -393,8 +393,8 @@ define <vscale x 4 x float> @vp_round_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -417,9 +417,9 @@ define <vscale x 8 x float> @vp_round_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -439,8 +439,8 @@ define <vscale x 8 x float> @vp_round_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -463,9 +463,9 @@ define <vscale x 16 x float> @vp_round_nxv16f32(<vscale x 16 x float> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -485,8 +485,8 @@ define <vscale x 16 x float> @vp_round_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -506,11 +506,11 @@ define <vscale x 1 x double> @vp_round_nxv1f64(<vscale x 1 x double> %va, <vscal
 ; CHECK-LABEL: vp_round_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -527,10 +527,10 @@ define <vscale x 1 x double> @vp_round_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-LABEL: vp_round_nxv1f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -551,11 +551,11 @@ define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -573,10 +573,10 @@ define <vscale x 2 x double> @vp_round_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-LABEL: vp_round_nxv2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -597,11 +597,11 @@ define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -619,10 +619,10 @@ define <vscale x 4 x double> @vp_round_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-LABEL: vp_round_nxv4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -643,11 +643,11 @@ define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI28_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI28_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -665,10 +665,10 @@ define <vscale x 7 x double> @vp_round_nxv7f64_unmasked(<vscale x 7 x double> %v
 ; CHECK-LABEL: vp_round_nxv7f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI29_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI29_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI29_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -689,11 +689,11 @@ define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI30_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI30_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -711,10 +711,10 @@ define <vscale x 8 x double> @vp_round_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-LABEL: vp_round_nxv8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI31_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI31_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI31_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -750,12 +750,12 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI32_0)(a3)
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a2, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -776,7 +776,7 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -802,13 +802,13 @@ define <vscale x 16 x double> @vp_round_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    sub a2, a0, a1
 ; CHECK-NEXT:    lui a3, %hi(.LCPI33_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI33_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI33_0)(a3)
 ; CHECK-NEXT:    sltu a3, a0, a2
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a2, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; CHECK-NEXT:    fsrm a2
@@ -821,7 +821,7 @@ define <vscale x 16 x double> @vp_round_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:  .LBB33_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index 86fddce67885..ea95ec9c9468 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -10,11 +10,11 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16(<vscale x 1 x half> %va, <vscal
 ; CHECK-LABEL: vp_roundeven_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,10 +31,10 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16_unmasked(<vscale x 1 x half> %v
 ; CHECK-LABEL: vp_roundeven_nxv1f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -54,11 +54,11 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16(<vscale x 2 x half> %va, <vscal
 ; CHECK-LABEL: vp_roundeven_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -75,10 +75,10 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16_unmasked(<vscale x 2 x half> %v
 ; CHECK-LABEL: vp_roundeven_nxv2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -98,11 +98,11 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
 ; CHECK-LABEL: vp_roundeven_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -119,10 +119,10 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16_unmasked(<vscale x 4 x half> %v
 ; CHECK-LABEL: vp_roundeven_nxv4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -143,11 +143,11 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -165,10 +165,10 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16_unmasked(<vscale x 8 x half> %v
 ; CHECK-LABEL: vp_roundeven_nxv8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -189,11 +189,11 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI8_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI8_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -211,10 +211,10 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16_unmasked(<vscale x 16 x half>
 ; CHECK-LABEL: vp_roundeven_nxv16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI9_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI9_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI9_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -235,11 +235,11 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI10_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI10_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -257,10 +257,10 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 ; CHECK-LABEL: vp_roundeven_nxv32f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI11_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI11_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI11_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -282,9 +282,9 @@ define <vscale x 1 x float> @vp_roundeven_nxv1f32(<vscale x 1 x float> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -303,8 +303,8 @@ define <vscale x 1 x float> @vp_roundeven_nxv1f32_unmasked(<vscale x 1 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -326,9 +326,9 @@ define <vscale x 2 x float> @vp_roundeven_nxv2f32(<vscale x 2 x float> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -347,8 +347,8 @@ define <vscale x 2 x float> @vp_roundeven_nxv2f32_unmasked(<vscale x 2 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -371,9 +371,9 @@ define <vscale x 4 x float> @vp_roundeven_nxv4f32(<vscale x 4 x float> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -393,8 +393,8 @@ define <vscale x 4 x float> @vp_roundeven_nxv4f32_unmasked(<vscale x 4 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -417,9 +417,9 @@ define <vscale x 8 x float> @vp_roundeven_nxv8f32(<vscale x 8 x float> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -439,8 +439,8 @@ define <vscale x 8 x float> @vp_roundeven_nxv8f32_unmasked(<vscale x 8 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -463,9 +463,9 @@ define <vscale x 16 x float> @vp_roundeven_nxv16f32(<vscale x 16 x float> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -485,8 +485,8 @@ define <vscale x 16 x float> @vp_roundeven_nxv16f32_unmasked(<vscale x 16 x floa
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -506,11 +506,11 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64(<vscale x 1 x double> %va, <v
 ; CHECK-LABEL: vp_roundeven_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -527,10 +527,10 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double
 ; CHECK-LABEL: vp_roundeven_nxv1f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -551,11 +551,11 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -573,10 +573,10 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double
 ; CHECK-LABEL: vp_roundeven_nxv2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -597,11 +597,11 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -619,10 +619,10 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double
 ; CHECK-LABEL: vp_roundeven_nxv4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -643,11 +643,11 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI28_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI28_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -665,10 +665,10 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double
 ; CHECK-LABEL: vp_roundeven_nxv7f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI29_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI29_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI29_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -689,11 +689,11 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI30_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI30_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -711,10 +711,10 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double
 ; CHECK-LABEL: vp_roundeven_nxv8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI31_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI31_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI31_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -750,12 +750,12 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI32_0)(a3)
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a2, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -776,7 +776,7 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -802,13 +802,13 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64_unmasked(<vscale x 16 x dou
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    sub a2, a0, a1
 ; CHECK-NEXT:    lui a3, %hi(.LCPI33_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI33_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI33_0)(a3)
 ; CHECK-NEXT:    sltu a3, a0, a2
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a2, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; CHECK-NEXT:    fsrm a2
@@ -821,7 +821,7 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64_unmasked(<vscale x 16 x dou
 ; CHECK-NEXT:  .LBB33_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index 1c6759f7bb39..593a6aac07bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -10,11 +10,11 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16(<vscale x 1 x half> %va, <vsc
 ; CHECK-LABEL: vp_roundtozero_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -31,10 +31,10 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16_unmasked(<vscale x 1 x half>
 ; CHECK-LABEL: vp_roundtozero_nxv1f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -54,11 +54,11 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16(<vscale x 2 x half> %va, <vsc
 ; CHECK-LABEL: vp_roundtozero_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -75,10 +75,10 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16_unmasked(<vscale x 2 x half>
 ; CHECK-LABEL: vp_roundtozero_nxv2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -98,11 +98,11 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
 ; CHECK-LABEL: vp_roundtozero_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -119,10 +119,10 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16_unmasked(<vscale x 4 x half>
 ; CHECK-LABEL: vp_roundtozero_nxv4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -143,11 +143,11 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -165,10 +165,10 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16_unmasked(<vscale x 8 x half>
 ; CHECK-LABEL: vp_roundtozero_nxv8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI7_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -189,11 +189,11 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI8_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI8_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -211,10 +211,10 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16_unmasked(<vscale x 16 x hal
 ; CHECK-LABEL: vp_roundtozero_nxv16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI9_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI9_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI9_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -235,11 +235,11 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI10_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI10_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -257,10 +257,10 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
 ; CHECK-LABEL: vp_roundtozero_nxv32f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI11_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI11_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI11_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -282,9 +282,9 @@ define <vscale x 1 x float> @vp_roundtozero_nxv1f32(<vscale x 1 x float> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -303,8 +303,8 @@ define <vscale x 1 x float> @vp_roundtozero_nxv1f32_unmasked(<vscale x 1 x float
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -326,9 +326,9 @@ define <vscale x 2 x float> @vp_roundtozero_nxv2f32(<vscale x 2 x float> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -347,8 +347,8 @@ define <vscale x 2 x float> @vp_roundtozero_nxv2f32_unmasked(<vscale x 2 x float
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -371,9 +371,9 @@ define <vscale x 4 x float> @vp_roundtozero_nxv4f32(<vscale x 4 x float> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -393,8 +393,8 @@ define <vscale x 4 x float> @vp_roundtozero_nxv4f32_unmasked(<vscale x 4 x float
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -417,9 +417,9 @@ define <vscale x 8 x float> @vp_roundtozero_nxv8f32(<vscale x 8 x float> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -439,8 +439,8 @@ define <vscale x 8 x float> @vp_roundtozero_nxv8f32_unmasked(<vscale x 8 x float
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -463,9 +463,9 @@ define <vscale x 16 x float> @vp_roundtozero_nxv16f32(<vscale x 16 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -485,8 +485,8 @@ define <vscale x 16 x float> @vp_roundtozero_nxv16f32_unmasked(<vscale x 16 x fl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -506,11 +506,11 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64(<vscale x 1 x double> %va,
 ; CHECK-LABEL: vp_roundtozero_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI22_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -527,10 +527,10 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64_unmasked(<vscale x 1 x doub
 ; CHECK-LABEL: vp_roundtozero_nxv1f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI23_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, ft0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -551,11 +551,11 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI24_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v10
@@ -573,10 +573,10 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64_unmasked(<vscale x 2 x doub
 ; CHECK-LABEL: vp_roundtozero_nxv2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI25_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, ft0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -597,11 +597,11 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v12, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v12
@@ -619,10 +619,10 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64_unmasked(<vscale x 4 x doub
 ; CHECK-LABEL: vp_roundtozero_nxv4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI27_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI27_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI27_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, ft0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -643,11 +643,11 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI28_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI28_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -665,10 +665,10 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64_unmasked(<vscale x 7 x doub
 ; CHECK-LABEL: vp_roundtozero_nxv7f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI29_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI29_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI29_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -689,11 +689,11 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v16, v0
 ; CHECK-NEXT:    lui a1, %hi(.LCPI30_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI30_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v16, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v16
@@ -711,10 +711,10 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64_unmasked(<vscale x 8 x doub
 ; CHECK-LABEL: vp_roundtozero_nxv8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI31_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI31_0)(a1)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI31_0)(a1)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
-; CHECK-NEXT:    vmflt.vf v0, v16, ft0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
@@ -750,12 +750,12 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI32_0)(a3)
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vfabs.v v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v2, v24, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v2, v24, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a2, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
@@ -776,7 +776,7 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vfabs.v v16, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v1, v16, ft0, v0.t
+; CHECK-NEXT:    vmflt.vf v1, v16, fa5, v0.t
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
@@ -802,13 +802,13 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64_unmasked(<vscale x 16 x d
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    sub a2, a0, a1
 ; CHECK-NEXT:    lui a3, %hi(.LCPI33_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI33_0)(a3)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI33_0)(a3)
 ; CHECK-NEXT:    sltu a3, a0, a2
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a2, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; CHECK-NEXT:    fsrm a2
@@ -821,7 +821,7 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64_unmasked(<vscale x 16 x d
 ; CHECK-NEXT:  .LBB33_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v8
-; CHECK-NEXT:    vmflt.vf v0, v24, ft0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a0, 1
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; CHECK-NEXT:    fsrm a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
index 04c708ec1d4c..8509dd5ca6d5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
@@ -2514,10 +2514,10 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
 ; RV32-LABEL: fcmp_oeq_vf_nx16f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    fcvt.d.w ft0, zero
+; RV32-NEXT:    fcvt.d.w fa5, zero
 ; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; RV32-NEXT:    vmfeq.vf v24, v16, ft0
-; RV32-NEXT:    vmfeq.vf v0, v8, ft0
+; RV32-NEXT:    vmfeq.vf v24, v16, fa5
+; RV32-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    srli a0, a0, 3
 ; RV32-NEXT:    add a1, a0, a0
@@ -2527,10 +2527,10 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
 ;
 ; RV64-LABEL: fcmp_oeq_vf_nx16f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    fmv.d.x ft0, zero
+; RV64-NEXT:    fmv.d.x fa5, zero
 ; RV64-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; RV64-NEXT:    vmfeq.vf v24, v16, ft0
-; RV64-NEXT:    vmfeq.vf v0, v8, ft0
+; RV64-NEXT:    vmfeq.vf v24, v16, fa5
+; RV64-NEXT:    vmfeq.vf v0, v8, fa5
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    srli a0, a0, 3
 ; RV64-NEXT:    add a1, a0, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 583e29b82c67..0ee01da02180 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -1489,9 +1489,9 @@ define void @sink_splat_fmul_scalable(ptr nocapture %a, float %x) {
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:  .LBB26_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    fmul.s ft0, ft0, fa0
-; CHECK-NEXT:    fsw ft0, 0(a0)
+; CHECK-NEXT:    flw fa5, 0(a0)
+; CHECK-NEXT:    fmul.s fa5, fa5, fa0
+; CHECK-NEXT:    fsw fa5, 0(a0)
 ; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    bnez a2, .LBB26_6
@@ -1579,9 +1579,9 @@ define void @sink_splat_fdiv_scalable(ptr nocapture %a, float %x) {
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:  .LBB27_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    fdiv.s ft0, ft0, fa0
-; CHECK-NEXT:    fsw ft0, 0(a0)
+; CHECK-NEXT:    flw fa5, 0(a0)
+; CHECK-NEXT:    fdiv.s fa5, fa5, fa0
+; CHECK-NEXT:    fsw fa5, 0(a0)
 ; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    bnez a2, .LBB27_6
@@ -1669,9 +1669,9 @@ define void @sink_splat_frdiv_scalable(ptr nocapture %a, float %x) {
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:  .LBB28_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    fdiv.s ft0, fa0, ft0
-; CHECK-NEXT:    fsw ft0, 0(a0)
+; CHECK-NEXT:    flw fa5, 0(a0)
+; CHECK-NEXT:    fdiv.s fa5, fa0, fa5
+; CHECK-NEXT:    fsw fa5, 0(a0)
 ; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    bnez a2, .LBB28_6
@@ -1759,9 +1759,9 @@ define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:  .LBB29_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    fadd.s ft0, ft0, fa0
-; CHECK-NEXT:    fsw ft0, 0(a0)
+; CHECK-NEXT:    flw fa5, 0(a0)
+; CHECK-NEXT:    fadd.s fa5, fa5, fa0
+; CHECK-NEXT:    fsw fa5, 0(a0)
 ; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    bnez a2, .LBB29_6
@@ -1849,9 +1849,9 @@ define void @sink_splat_fsub_scalable(ptr nocapture %a, float %x) {
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:  .LBB30_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    fsub.s ft0, ft0, fa0
-; CHECK-NEXT:    fsw ft0, 0(a0)
+; CHECK-NEXT:    flw fa5, 0(a0)
+; CHECK-NEXT:    fsub.s fa5, fa5, fa0
+; CHECK-NEXT:    fsw fa5, 0(a0)
 ; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    bnez a2, .LBB30_6
@@ -1939,9 +1939,9 @@ define void @sink_splat_frsub_scalable(ptr nocapture %a, float %x) {
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:  .LBB31_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    fsub.s ft0, fa0, ft0
-; CHECK-NEXT:    fsw ft0, 0(a0)
+; CHECK-NEXT:    flw fa5, 0(a0)
+; CHECK-NEXT:    fsub.s fa5, fa0, fa5
+; CHECK-NEXT:    fsw fa5, 0(a0)
 ; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
 ; CHECK-NEXT:    bnez a2, .LBB31_6
@@ -2109,10 +2109,10 @@ define void @sink_splat_fma_scalable(ptr noalias nocapture %a, ptr noalias nocap
 ; CHECK-NEXT:    add a0, a0, a2
 ; CHECK-NEXT:  .LBB34_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    flw ft1, 0(a1)
-; CHECK-NEXT:    fmadd.s ft0, ft0, fa0, ft1
-; CHECK-NEXT:    fsw ft0, 0(a0)
+; CHECK-NEXT:    flw fa5, 0(a0)
+; CHECK-NEXT:    flw fa4, 0(a1)
+; CHECK-NEXT:    fmadd.s fa5, fa5, fa0, fa4
+; CHECK-NEXT:    fsw fa5, 0(a0)
 ; CHECK-NEXT:    addi a3, a3, 1
 ; CHECK-NEXT:    addi a1, a1, 4
 ; CHECK-NEXT:    addi a0, a0, 4
@@ -2209,10 +2209,10 @@ define void @sink_splat_fma_commute_scalable(ptr noalias nocapture %a, ptr noali
 ; CHECK-NEXT:    add a0, a0, a2
 ; CHECK-NEXT:  .LBB35_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    flw ft1, 0(a1)
-; CHECK-NEXT:    fmadd.s ft0, fa0, ft0, ft1
-; CHECK-NEXT:    fsw ft0, 0(a0)
+; CHECK-NEXT:    flw fa5, 0(a0)
+; CHECK-NEXT:    flw fa4, 0(a1)
+; CHECK-NEXT:    fmadd.s fa5, fa0, fa5, fa4
+; CHECK-NEXT:    fsw fa5, 0(a0)
 ; CHECK-NEXT:    addi a3, a3, 1
 ; CHECK-NEXT:    addi a1, a1, 4
 ; CHECK-NEXT:    addi a0, a0, 4

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll
index b431fd0d7983..eb4e5954515f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll
@@ -604,9 +604,9 @@ define <vscale x 1 x float> @vfcopysign_exttrunc_vv_nxv1f32_nxv1f16(<vscale x 1
 define <vscale x 1 x float> @vfcopysign_exttrunc_vf_nxv1f32_nxv1f16(<vscale x 1 x float> %vm, half %s) {
 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.s.h ft0, fa0
+; CHECK-NEXT:    fcvt.s.h fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x half> poison, half %s, i32 0
   %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@@ -632,9 +632,9 @@ define <vscale x 1 x float> @vfcopynsign_exttrunc_vv_nxv1f32_nxv1f16(<vscale x 1
 define <vscale x 1 x float> @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16(<vscale x 1 x float> %vm, half %s) {
 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.s.h ft0, fa0
+; CHECK-NEXT:    fcvt.s.h fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x half> poison, half %s, i32 0
   %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@@ -862,9 +862,9 @@ define <vscale x 8 x float> @vfcopysign_exttrunc_vv_nxv8f32_nxv8f16(<vscale x 8
 define <vscale x 8 x float> @vfcopysign_exttrunc_vf_nxv8f32_nxv8f16(<vscale x 8 x float> %vm, half %s) {
 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.s.h ft0, fa0
+; CHECK-NEXT:    fcvt.s.h fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %s, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@@ -890,9 +890,9 @@ define <vscale x 8 x float> @vfcopynsign_exttrunc_vv_nxv8f32_nxv8f16(<vscale x 8
 define <vscale x 8 x float> @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16(<vscale x 8 x float> %vm, half %s) {
 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.s.h ft0, fa0
+; CHECK-NEXT:    fcvt.s.h fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %s, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@@ -1074,9 +1074,9 @@ define <vscale x 1 x double> @vfcopysign_exttrunc_vv_nxv1f64_nxv1f16(<vscale x 1
 define <vscale x 1 x double> @vfcopysign_exttrunc_vf_nxv1f64_nxv1f16(<vscale x 1 x double> %vm, half %s) {
 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.d.h ft0, fa0
+; CHECK-NEXT:    fcvt.d.h fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x half> poison, half %s, i32 0
   %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@@ -1104,9 +1104,9 @@ define <vscale x 1 x double> @vfcopynsign_exttrunc_vv_nxv1f64_nxv1f16(<vscale x
 define <vscale x 1 x double> @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16(<vscale x 1 x double> %vm, half %s) {
 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.d.h ft0, fa0
+; CHECK-NEXT:    fcvt.d.h fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x half> poison, half %s, i32 0
   %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@@ -1132,9 +1132,9 @@ define <vscale x 1 x double> @vfcopysign_exttrunc_vv_nxv1f64_nxv1f32(<vscale x 1
 define <vscale x 1 x double> @vfcopysign_exttrunc_vf_nxv1f64_nxv1f32(<vscale x 1 x double> %vm, float %s) {
 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.d.s ft0, fa0
+; CHECK-NEXT:    fcvt.d.s fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x float> poison, float %s, i32 0
   %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
@@ -1160,9 +1160,9 @@ define <vscale x 1 x double> @vfcopynsign_exttrunc_vv_nxv1f64_nxv1f32(<vscale x
 define <vscale x 1 x double> @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32(<vscale x 1 x double> %vm, float %s) {
 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.d.s ft0, fa0
+; CHECK-NEXT:    fcvt.d.s fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x float> poison, float %s, i32 0
   %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
@@ -1334,9 +1334,9 @@ define <vscale x 8 x double> @vfcopysign_exttrunc_vv_nxv8f64_nxv8f16(<vscale x 8
 define <vscale x 8 x double> @vfcopysign_exttrunc_vf_nxv8f64_nxv8f16(<vscale x 8 x double> %vm, half %s) {
 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.d.h ft0, fa0
+; CHECK-NEXT:    fcvt.d.h fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %s, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@@ -1364,9 +1364,9 @@ define <vscale x 8 x double> @vfcopynsign_exttrunc_vv_nxv8f64_nxv8f16(<vscale x
 define <vscale x 8 x double> @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16(<vscale x 8 x double> %vm, half %s) {
 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.d.h ft0, fa0
+; CHECK-NEXT:    fcvt.d.h fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %s, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@@ -1392,9 +1392,9 @@ define <vscale x 8 x double> @vfcopysign_exttrunc_vv_nxv8f64_nxv8f32(<vscale x 8
 define <vscale x 8 x double> @vfcopysign_exttrunc_vf_nxv8f64_nxv8f32(<vscale x 8 x double> %vm, float %s) {
 ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.d.s ft0, fa0
+; CHECK-NEXT:    fcvt.d.s fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %s, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
@@ -1420,9 +1420,9 @@ define <vscale x 8 x double> @vfcopynsign_exttrunc_vv_nxv8f64_nxv8f32(<vscale x
 define <vscale x 8 x double> @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32(<vscale x 8 x double> %vm, float %s) {
 ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fcvt.d.s ft0, fa0
+; CHECK-NEXT:    fcvt.d.s fa5, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %s, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
index c28e93a17d17..c0e021c3f069 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
@@ -424,9 +424,9 @@ define half @vreduce_fmin_nxv1f16(<vscale x 1 x half> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI30_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI30_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI30_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -438,9 +438,9 @@ define half @vreduce_fmin_nxv1f16_nonans(<vscale x 1 x half> %v) #0 {
 ; CHECK-LABEL: vreduce_fmin_nxv1f16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI31_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI31_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI31_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -452,9 +452,9 @@ define half @vreduce_fmin_nxv1f16_nonans_noinfs(<vscale x 1 x half> %v) #1 {
 ; CHECK-LABEL: vreduce_fmin_nxv1f16_nonans_noinfs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI32_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI32_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI32_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -468,9 +468,9 @@ define half @vreduce_fmin_nxv2f16(<vscale x 2 x half> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI33_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI33_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI33_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -484,9 +484,9 @@ define half @vreduce_fmin_nxv4f16(<vscale x 4 x half> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI34_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI34_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI34_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -500,10 +500,10 @@ define half @vreduce_fmin_nxv64f16(<vscale x 64 x half> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv64f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI35_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI35_0)(a0)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI35_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    vfmv.s.f v16, ft0
+; CHECK-NEXT:    vfmv.s.f v16, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v16
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -543,9 +543,9 @@ define float @vreduce_fmin_nxv1f32_nonans_noinfs(<vscale x 1 x float> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv1f32_nonans_noinfs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI38_0)
-; CHECK-NEXT:    flw ft0, %lo(.LCPI38_0)(a0)
+; CHECK-NEXT:    flw fa5, %lo(.LCPI38_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -605,9 +605,9 @@ define double @vreduce_fmin_nxv1f64(<vscale x 1 x double> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI42_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI42_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -619,9 +619,9 @@ define double @vreduce_fmin_nxv1f64_nonans(<vscale x 1 x double> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv1f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI43_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI43_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -633,9 +633,9 @@ define double @vreduce_fmin_nxv1f64_nonans_noinfs(<vscale x 1 x double> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv1f64_nonans_noinfs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI44_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI44_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -649,9 +649,9 @@ define double @vreduce_fmin_nxv2f64(<vscale x 2 x double> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI45_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI45_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vfmv.s.f v10, ft0
+; CHECK-NEXT:    vfmv.s.f v10, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v10
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -665,9 +665,9 @@ define double @vreduce_fmin_nxv4f64(<vscale x 4 x double> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI46_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI46_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI46_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vfmv.s.f v12, ft0
+; CHECK-NEXT:    vfmv.s.f v12, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v12
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -681,10 +681,10 @@ define double @vreduce_fmin_nxv16f64(<vscale x 16 x double> %v) {
 ; CHECK-LABEL: vreduce_fmin_nxv16f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI47_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI47_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI47_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    vfmv.s.f v16, ft0
+; CHECK-NEXT:    vfmv.s.f v16, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v16
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -811,9 +811,9 @@ define float @vreduce_fmax_nxv1f32_nonans_noinfs(<vscale x 1 x float> %v) {
 ; CHECK-LABEL: vreduce_fmax_nxv1f32_nonans_noinfs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI56_0)
-; CHECK-NEXT:    flw ft0, %lo(.LCPI56_0)(a0)
+; CHECK-NEXT:    flw fa5, %lo(.LCPI56_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmax.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -873,9 +873,9 @@ define double @vreduce_fmax_nxv1f64(<vscale x 1 x double> %v) {
 ; CHECK-LABEL: vreduce_fmax_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI60_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI60_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI60_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmax.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -887,9 +887,9 @@ define double @vreduce_fmax_nxv1f64_nonans(<vscale x 1 x double> %v) {
 ; CHECK-LABEL: vreduce_fmax_nxv1f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI61_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI61_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI61_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmax.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -901,9 +901,9 @@ define double @vreduce_fmax_nxv1f64_nonans_noinfs(<vscale x 1 x double> %v) {
 ; CHECK-LABEL: vreduce_fmax_nxv1f64_nonans_noinfs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI62_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI62_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI62_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, ft0
+; CHECK-NEXT:    vfmv.s.f v9, fa5
 ; CHECK-NEXT:    vfredmax.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -917,9 +917,9 @@ define double @vreduce_fmax_nxv2f64(<vscale x 2 x double> %v) {
 ; CHECK-LABEL: vreduce_fmax_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI63_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI63_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI63_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vfmv.s.f v10, ft0
+; CHECK-NEXT:    vfmv.s.f v10, fa5
 ; CHECK-NEXT:    vfredmax.vs v8, v8, v10
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -933,9 +933,9 @@ define double @vreduce_fmax_nxv4f64(<vscale x 4 x double> %v) {
 ; CHECK-LABEL: vreduce_fmax_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI64_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI64_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI64_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vfmv.s.f v12, ft0
+; CHECK-NEXT:    vfmv.s.f v12, fa5
 ; CHECK-NEXT:    vfredmax.vs v8, v8, v12
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -949,10 +949,10 @@ define double @vreduce_fmax_nxv16f64(<vscale x 16 x double> %v) {
 ; CHECK-LABEL: vreduce_fmax_nxv16f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a0, %hi(.LCPI65_0)
-; CHECK-NEXT:    fld ft0, %lo(.LCPI65_0)(a0)
+; CHECK-NEXT:    fld fa5, %lo(.LCPI65_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    vfmv.s.f v16, ft0
+; CHECK-NEXT:    vfmv.s.f v16, fa5
 ; CHECK-NEXT:    vfredmax.vs v8, v8, v16
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -1112,11 +1112,11 @@ define half @vreduce_fmin_nxv10f16(<vscale x 10 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    lui a1, %hi(.LCPI73_0)
-; CHECK-NEXT:    flh ft0, %lo(.LCPI73_0)(a1)
+; CHECK-NEXT:    flh fa5, %lo(.LCPI73_0)(a1)
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    add a1, a0, a0
 ; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfmv.v.f v12, ft0
+; CHECK-NEXT:    vfmv.v.f v12, fa5
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
 ; CHECK-NEXT:    vslideup.vx v10, v12, a0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
@@ -1124,7 +1124,7 @@ define half @vreduce_fmin_nxv10f16(<vscale x 10 x half> %v) {
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
 ; CHECK-NEXT:    vslideup.vx v11, v12, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfmv.s.f v12, ft0
+; CHECK-NEXT:    vfmv.s.f v12, fa5
 ; CHECK-NEXT:    vfredmin.vs v8, v8, v12
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 1a462576a7b3..68b52cddec8b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -331,11 +331,11 @@ define double @test17(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double>
 ; CHECK-LABEL: test17:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vfmv.f.s ft0, v8
+; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    vfmv.f.s ft1, v8
-; CHECK-NEXT:    fadd.d fa0, ft0, ft1
+; CHECK-NEXT:    vfmv.f.s fa4, v8
+; CHECK-NEXT:    fadd.d fa0, fa5, fa4
 ; CHECK-NEXT:    ret
 entry:
   %vsetvli = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
index 0612d88a91d7..607f6385d3b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
@@ -84,9 +84,9 @@ define <vscale x 8 x float> @vsplat_load_nxv8f32(ptr %ptr) {
 ;
 ; NOT-OPTIMIZED-LABEL: vsplat_load_nxv8f32:
 ; NOT-OPTIMIZED:       # %bb.0:
-; NOT-OPTIMIZED-NEXT:    flw ft0, 0(a0)
+; NOT-OPTIMIZED-NEXT:    flw fa5, 0(a0)
 ; NOT-OPTIMIZED-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; NOT-OPTIMIZED-NEXT:    vfmv.v.f v8, ft0
+; NOT-OPTIMIZED-NEXT:    vfmv.v.f v8, fa5
 ; NOT-OPTIMIZED-NEXT:    ret
   %f = load float, ptr %ptr
   %head = insertelement <vscale x 8 x float> poison, float %f, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll
index e4ae59e01202..89d3219abb4e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll
@@ -7,9 +7,9 @@
 define <vscale x 8 x half> @vsplat_f16_0p625() {
 ; CHECK-LABEL: vsplat_f16_0p625:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fli.h ft0, 0.625
+; CHECK-NEXT:    fli.h fa5, 0.625
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfmv.v.f v8, ft0
+; CHECK-NEXT:    vfmv.v.f v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half 0.625, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@@ -19,9 +19,9 @@ define <vscale x 8 x half> @vsplat_f16_0p625() {
 define <vscale x 8 x float> @vsplat_f32_0p75() {
 ; CHECK-LABEL: vsplat_f32_0p75:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fli.s ft0, 0.75
+; CHECK-NEXT:    fli.s fa5, 0.75
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfmv.v.f v8, ft0
+; CHECK-NEXT:    vfmv.v.f v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float 0.75, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
@@ -31,9 +31,9 @@ define <vscale x 8 x float> @vsplat_f32_0p75() {
 define <vscale x 8 x double> @vsplat_f64_neg1() {
 ; CHECK-LABEL: vsplat_f64_neg1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    fli.d ft0, -1.0
+; CHECK-NEXT:    fli.d fa5, -1.0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vfmv.v.f v8, ft0
+; CHECK-NEXT:    vfmv.v.f v8, fa5
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double -1.0, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/select-const.ll b/llvm/test/CodeGen/RISCV/select-const.ll
index 07f7f446f994..792df6236ddc 100644
--- a/llvm/test/CodeGen/RISCV/select-const.ll
+++ b/llvm/test/CodeGen/RISCV/select-const.ll
@@ -102,8 +102,8 @@ define float @select_const_fp(i1 zeroext %a) nounwind {
 ; RV32IF-NEXT:  .LBB4_2:
 ; RV32IF-NEXT:    lui a0, 263168
 ; RV32IF-NEXT:  .LBB4_3:
-; RV32IF-NEXT:    fmv.w.x ft0, a0
-; RV32IF-NEXT:    fmv.x.w a0, ft0
+; RV32IF-NEXT:    fmv.w.x fa5, a0
+; RV32IF-NEXT:    fmv.x.w a0, fa5
 ; RV32IF-NEXT:    ret
 ;
 ; RV64I-LABEL: select_const_fp:
@@ -125,8 +125,8 @@ define float @select_const_fp(i1 zeroext %a) nounwind {
 ; RV64IFD-NEXT:  .LBB4_2:
 ; RV64IFD-NEXT:    lui a0, 263168
 ; RV64IFD-NEXT:  .LBB4_3:
-; RV64IFD-NEXT:    fmv.w.x ft0, a0
-; RV64IFD-NEXT:    fmv.x.w a0, ft0
+; RV64IFD-NEXT:    fmv.w.x fa5, a0
+; RV64IFD-NEXT:    fmv.x.w a0, fa5
 ; RV64IFD-NEXT:    ret
   %1 = select i1 %a, float 3.0, float 4.0
   ret float %1

diff  --git a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
index 0efe2996ec84..f4c3b072c8fe 100644
--- a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
+++ b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
@@ -181,15 +181,15 @@ define float @cmovfloat(i1 %a, float %b, float %c, float %d, float %e) nounwind
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    bnez a0, .LBB4_2
 ; RV32I-NEXT:  # %bb.1: # %entry
-; RV32I-NEXT:    fmv.w.x ft0, a4
-; RV32I-NEXT:    fmv.w.x ft1, a2
+; RV32I-NEXT:    fmv.w.x fa5, a4
+; RV32I-NEXT:    fmv.w.x fa4, a2
 ; RV32I-NEXT:    j .LBB4_3
 ; RV32I-NEXT:  .LBB4_2:
-; RV32I-NEXT:    fmv.w.x ft0, a3
-; RV32I-NEXT:    fmv.w.x ft1, a1
+; RV32I-NEXT:    fmv.w.x fa5, a3
+; RV32I-NEXT:    fmv.w.x fa4, a1
 ; RV32I-NEXT:  .LBB4_3: # %entry
-; RV32I-NEXT:    fadd.s ft0, ft1, ft0
-; RV32I-NEXT:    fmv.x.w a0, ft0
+; RV32I-NEXT:    fadd.s fa5, fa4, fa5
+; RV32I-NEXT:    fmv.x.w a0, fa5
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cmovfloat:
@@ -197,15 +197,15 @@ define float @cmovfloat(i1 %a, float %b, float %c, float %d, float %e) nounwind
 ; RV64I-NEXT:    andi a0, a0, 1
 ; RV64I-NEXT:    bnez a0, .LBB4_2
 ; RV64I-NEXT:  # %bb.1: # %entry
-; RV64I-NEXT:    fmv.w.x ft0, a4
-; RV64I-NEXT:    fmv.w.x ft1, a2
+; RV64I-NEXT:    fmv.w.x fa5, a4
+; RV64I-NEXT:    fmv.w.x fa4, a2
 ; RV64I-NEXT:    j .LBB4_3
 ; RV64I-NEXT:  .LBB4_2:
-; RV64I-NEXT:    fmv.w.x ft0, a3
-; RV64I-NEXT:    fmv.w.x ft1, a1
+; RV64I-NEXT:    fmv.w.x fa5, a3
+; RV64I-NEXT:    fmv.w.x fa4, a1
 ; RV64I-NEXT:  .LBB4_3: # %entry
-; RV64I-NEXT:    fadd.s ft0, ft1, ft0
-; RV64I-NEXT:    fmv.x.w a0, ft0
+; RV64I-NEXT:    fadd.s fa5, fa4, fa5
+; RV64I-NEXT:    fmv.x.w a0, fa5
 ; RV64I-NEXT:    ret
 entry:
   %cond1 = select i1 %a, float %b, float %c
@@ -220,15 +220,15 @@ define double @cmovdouble(i1 %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw a3, 8(sp)
 ; RV32I-NEXT:    sw a4, 12(sp)
-; RV32I-NEXT:    fld ft0, 8(sp)
+; RV32I-NEXT:    fld fa5, 8(sp)
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    sw a2, 12(sp)
 ; RV32I-NEXT:    beqz a0, .LBB5_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    fld ft0, 8(sp)
+; RV32I-NEXT:    fld fa5, 8(sp)
 ; RV32I-NEXT:  .LBB5_2: # %entry
-; RV32I-NEXT:    fsd ft0, 8(sp)
+; RV32I-NEXT:    fsd fa5, 8(sp)
 ; RV32I-NEXT:    lw a0, 8(sp)
 ; RV32I-NEXT:    lw a1, 12(sp)
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -239,12 +239,12 @@ define double @cmovdouble(i1 %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    andi a0, a0, 1
 ; RV64I-NEXT:    bnez a0, .LBB5_2
 ; RV64I-NEXT:  # %bb.1: # %entry
-; RV64I-NEXT:    fmv.d.x ft0, a2
-; RV64I-NEXT:    fmv.x.d a0, ft0
+; RV64I-NEXT:    fmv.d.x fa5, a2
+; RV64I-NEXT:    fmv.x.d a0, fa5
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB5_2:
-; RV64I-NEXT:    fmv.d.x ft0, a1
-; RV64I-NEXT:    fmv.x.d a0, ft0
+; RV64I-NEXT:    fmv.d.x fa5, a1
+; RV64I-NEXT:    fmv.x.d a0, fa5
 ; RV64I-NEXT:    ret
 entry:
   %cond = select i1 %a, double %b, double %c
@@ -342,36 +342,36 @@ entry:
 define float @CascadedSelect(float noundef %a) {
 ; RV32I-LABEL: CascadedSelect:
 ; RV32I:       # %bb.0: # %entry
-; RV32I-NEXT:    fmv.w.x ft0, a0
+; RV32I-NEXT:    fmv.w.x fa5, a0
 ; RV32I-NEXT:    lui a0, 260096
-; RV32I-NEXT:    fmv.w.x ft1, a0
-; RV32I-NEXT:    flt.s a0, ft1, ft0
+; RV32I-NEXT:    fmv.w.x fa4, a0
+; RV32I-NEXT:    flt.s a0, fa4, fa5
 ; RV32I-NEXT:    bnez a0, .LBB8_3
 ; RV32I-NEXT:  # %bb.1: # %entry
-; RV32I-NEXT:    fmv.w.x ft1, zero
-; RV32I-NEXT:    flt.s a0, ft0, ft1
+; RV32I-NEXT:    fmv.w.x fa4, zero
+; RV32I-NEXT:    flt.s a0, fa5, fa4
 ; RV32I-NEXT:    bnez a0, .LBB8_3
 ; RV32I-NEXT:  # %bb.2: # %entry
-; RV32I-NEXT:    fmv.s ft1, ft0
+; RV32I-NEXT:    fmv.s fa4, fa5
 ; RV32I-NEXT:  .LBB8_3: # %entry
-; RV32I-NEXT:    fmv.x.w a0, ft1
+; RV32I-NEXT:    fmv.x.w a0, fa4
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: CascadedSelect:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    fmv.w.x ft0, a0
+; RV64I-NEXT:    fmv.w.x fa5, a0
 ; RV64I-NEXT:    lui a0, 260096
-; RV64I-NEXT:    fmv.w.x ft1, a0
-; RV64I-NEXT:    flt.s a0, ft1, ft0
+; RV64I-NEXT:    fmv.w.x fa4, a0
+; RV64I-NEXT:    flt.s a0, fa4, fa5
 ; RV64I-NEXT:    bnez a0, .LBB8_3
 ; RV64I-NEXT:  # %bb.1: # %entry
-; RV64I-NEXT:    fmv.w.x ft1, zero
-; RV64I-NEXT:    flt.s a0, ft0, ft1
+; RV64I-NEXT:    fmv.w.x fa4, zero
+; RV64I-NEXT:    flt.s a0, fa5, fa4
 ; RV64I-NEXT:    bnez a0, .LBB8_3
 ; RV64I-NEXT:  # %bb.2: # %entry
-; RV64I-NEXT:    fmv.s ft1, ft0
+; RV64I-NEXT:    fmv.s fa4, fa5
 ; RV64I-NEXT:  .LBB8_3: # %entry
-; RV64I-NEXT:    fmv.x.w a0, ft1
+; RV64I-NEXT:    fmv.x.w a0, fa4
 ; RV64I-NEXT:    ret
 entry:
   %cmp = fcmp ogt float %a, 1.000000e+00

diff  --git a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
index 95622bfb11a1..865945cee280 100644
--- a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
+++ b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
@@ -16,13 +16,13 @@ define <vscale x 1 x half> @intrinsic_vfmv.f.s_s_nxv1f16(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    vsetivli zero, 0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fsh ft0, 14(sp) # 2-byte Folded Spill
+; CHECK-NEXT:    vfmv.f.s fa5, v8
+; CHECK-NEXT:    fsh fa5, 14(sp) # 2-byte Folded Spill
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    flh ft0, 14(sp) # 2-byte Folded Reload
-; CHECK-NEXT:    vfmv.v.f v8, ft0
+; CHECK-NEXT:    flh fa5, 14(sp) # 2-byte Folded Reload
+; CHECK-NEXT:    vfmv.v.f v8, fa5
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 entry:
@@ -37,13 +37,13 @@ define <vscale x 1 x float> @intrinsic_vfmv.f.s_s_nxv1f32(<vscale x 1 x float> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    vsetivli zero, 0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fsw ft0, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    vfmv.f.s fa5, v8
+; CHECK-NEXT:    fsw fa5, 12(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
-; CHECK-NEXT:    vfmv.v.f v8, ft0
+; CHECK-NEXT:    flw fa5, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    vfmv.v.f v8, fa5
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 entry:
@@ -58,13 +58,13 @@ define <vscale x 1 x double> @intrinsic_vfmv.f.s_s_nxv1f64(<vscale x 1 x double>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, ma
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fsd ft0, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    vfmv.f.s fa5, v8
+; CHECK-NEXT:    fsd fa5, 8(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    vfmv.v.f v8, ft0
+; CHECK-NEXT:    fld fa5, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    vfmv.v.f v8, fa5
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/subtarget-features-std-ext.ll b/llvm/test/CodeGen/RISCV/subtarget-features-std-ext.ll
index 613a983f926d..5893ae275e3b 100644
--- a/llvm/test/CodeGen/RISCV/subtarget-features-std-ext.ll
+++ b/llvm/test/CodeGen/RISCV/subtarget-features-std-ext.ll
@@ -9,8 +9,8 @@
 
 
 define float @foo(i32 %a) nounwind #0 {
-; RV32IF-ILP32: fcvt.s.w  ft0, a0
-; RV32IF-ILP32-NEXT: fmv.x.w a0, ft0
+; RV32IF-ILP32: fcvt.s.w  fa5, a0
+; RV32IF-ILP32-NEXT: fmv.x.w a0, fa5
 ; RV32IF-ILP32F: fcvt.s.w fa0, a0
 ; RV32IF-ILP32F-NEXT: ret
   %conv = sitofp i32 %a to float

diff  --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index dd3c3d865c96..b6b781821827 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -676,8 +676,8 @@ define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    andi a0, a0, -8
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a1, a0, 8
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a1, 12(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    fld ft0, 0(a0)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    fsd ft0, 0(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    fld fa5, 0(a0)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    fsd fa5, 0(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw a0, 0(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw a1, 4(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 48
@@ -996,8 +996,8 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    andi a0, a0, -8
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a3, a0, 8
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a3, 20(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    fld ft0, 0(a0)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    fsd ft0, 8(sp)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    fld fa5, 0(a0)
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    fsd fa5, 8(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw a0, 12(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw a3, 8(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    add a2, a2, a0

diff  --git a/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll b/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
index fb3fd3bd0050..0b908c73bdd3 100644
--- a/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
+++ b/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
@@ -7,14 +7,14 @@
 define float @flrw(float* %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: flrw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
-; RV32XTHEADMEMIDX-NEXT:    th.flrw ft0, a0, a1, 2
-; RV32XTHEADMEMIDX-NEXT:    fadd.s fa0, ft0, ft0
+; RV32XTHEADMEMIDX-NEXT:    th.flrw fa5, a0, a1, 2
+; RV32XTHEADMEMIDX-NEXT:    fadd.s fa0, fa5, fa5
 ; RV32XTHEADMEMIDX-NEXT:    ret
 ;
 ; RV64XTHEADFMEMIDX-LABEL: flrw:
 ; RV64XTHEADFMEMIDX:       # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT:    th.flrw ft0, a0, a1, 2
-; RV64XTHEADFMEMIDX-NEXT:    fadd.s fa0, ft0, ft0
+; RV64XTHEADFMEMIDX-NEXT:    th.flrw fa5, a0, a1, 2
+; RV64XTHEADFMEMIDX-NEXT:    fadd.s fa0, fa5, fa5
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = getelementptr float, float* %a, i64 %b
   %2 = load float, float* %1, align 4
@@ -25,14 +25,14 @@ define float @flrw(float* %a, i64 %b) {
 define float @flurw(float* %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: flurw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
-; RV32XTHEADMEMIDX-NEXT:    th.flrw ft0, a0, a1, 2
-; RV32XTHEADMEMIDX-NEXT:    fadd.s fa0, ft0, ft0
+; RV32XTHEADMEMIDX-NEXT:    th.flrw fa5, a0, a1, 2
+; RV32XTHEADMEMIDX-NEXT:    fadd.s fa0, fa5, fa5
 ; RV32XTHEADMEMIDX-NEXT:    ret
 ;
 ; RV64XTHEADFMEMIDX-LABEL: flurw:
 ; RV64XTHEADFMEMIDX:       # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT:    th.flurw ft0, a0, a1, 2
-; RV64XTHEADFMEMIDX-NEXT:    fadd.s fa0, ft0, ft0
+; RV64XTHEADFMEMIDX-NEXT:    th.flurw fa5, a0, a1, 2
+; RV64XTHEADFMEMIDX-NEXT:    fadd.s fa0, fa5, fa5
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = getelementptr float, float* %a, i64 %1
@@ -44,14 +44,14 @@ define float @flurw(float* %a, i32 %b) {
 define void @fsrw(float* %a, i64 %b, float %c) {
 ; RV32XTHEADMEMIDX-LABEL: fsrw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
-; RV32XTHEADMEMIDX-NEXT:    fadd.s ft0, fa0, fa0
-; RV32XTHEADMEMIDX-NEXT:    th.fsrw ft0, a0, a1, 2
+; RV32XTHEADMEMIDX-NEXT:    fadd.s fa5, fa0, fa0
+; RV32XTHEADMEMIDX-NEXT:    th.fsrw fa5, a0, a1, 2
 ; RV32XTHEADMEMIDX-NEXT:    ret
 ;
 ; RV64XTHEADFMEMIDX-LABEL: fsrw:
 ; RV64XTHEADFMEMIDX:       # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT:    fadd.s ft0, fa0, fa0
-; RV64XTHEADFMEMIDX-NEXT:    th.fsrw ft0, a0, a1, 2
+; RV64XTHEADFMEMIDX-NEXT:    fadd.s fa5, fa0, fa0
+; RV64XTHEADFMEMIDX-NEXT:    th.fsrw fa5, a0, a1, 2
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = fadd float %c, %c
   %2 = getelementptr float, float* %a, i64 %b
@@ -62,14 +62,14 @@ define void @fsrw(float* %a, i64 %b, float %c) {
 define void @fsurw(float* %a, i32 %b, float %c) {
 ; RV32XTHEADMEMIDX-LABEL: fsurw:
 ; RV32XTHEADMEMIDX:       # %bb.0:
-; RV32XTHEADMEMIDX-NEXT:    fadd.s ft0, fa0, fa0
-; RV32XTHEADMEMIDX-NEXT:    th.fsrw ft0, a0, a1, 2
+; RV32XTHEADMEMIDX-NEXT:    fadd.s fa5, fa0, fa0
+; RV32XTHEADMEMIDX-NEXT:    th.fsrw fa5, a0, a1, 2
 ; RV32XTHEADMEMIDX-NEXT:    ret
 ;
 ; RV64XTHEADFMEMIDX-LABEL: fsurw:
 ; RV64XTHEADFMEMIDX:       # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT:    fadd.s ft0, fa0, fa0
-; RV64XTHEADFMEMIDX-NEXT:    th.fsurw ft0, a0, a1, 2
+; RV64XTHEADFMEMIDX-NEXT:    fadd.s fa5, fa0, fa0
+; RV64XTHEADFMEMIDX-NEXT:    th.fsurw fa5, a0, a1, 2
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = fadd float %c, %c
@@ -81,14 +81,14 @@ define void @fsurw(float* %a, i32 %b, float %c) {
 define double @flrd(double* %a, i64 %b) {
 ; RV32XTHEADMEMIDX-LABEL: flrd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
-; RV32XTHEADMEMIDX-NEXT:    th.flrd ft0, a0, a1, 3
-; RV32XTHEADMEMIDX-NEXT:    fadd.d fa0, ft0, ft0
+; RV32XTHEADMEMIDX-NEXT:    th.flrd fa5, a0, a1, 3
+; RV32XTHEADMEMIDX-NEXT:    fadd.d fa0, fa5, fa5
 ; RV32XTHEADMEMIDX-NEXT:    ret
 ;
 ; RV64XTHEADFMEMIDX-LABEL: flrd:
 ; RV64XTHEADFMEMIDX:       # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT:    th.flrd ft0, a0, a1, 3
-; RV64XTHEADFMEMIDX-NEXT:    fadd.d fa0, ft0, ft0
+; RV64XTHEADFMEMIDX-NEXT:    th.flrd fa5, a0, a1, 3
+; RV64XTHEADFMEMIDX-NEXT:    fadd.d fa0, fa5, fa5
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = getelementptr double, double* %a, i64 %b
   %2 = load double, double* %1, align 8
@@ -99,14 +99,14 @@ define double @flrd(double* %a, i64 %b) {
 define double @flurd(double* %a, i32 %b) {
 ; RV32XTHEADMEMIDX-LABEL: flurd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
-; RV32XTHEADMEMIDX-NEXT:    th.flrd ft0, a0, a1, 3
-; RV32XTHEADMEMIDX-NEXT:    fadd.d fa0, ft0, ft0
+; RV32XTHEADMEMIDX-NEXT:    th.flrd fa5, a0, a1, 3
+; RV32XTHEADMEMIDX-NEXT:    fadd.d fa0, fa5, fa5
 ; RV32XTHEADMEMIDX-NEXT:    ret
 ;
 ; RV64XTHEADFMEMIDX-LABEL: flurd:
 ; RV64XTHEADFMEMIDX:       # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT:    th.flurd ft0, a0, a1, 3
-; RV64XTHEADFMEMIDX-NEXT:    fadd.d fa0, ft0, ft0
+; RV64XTHEADFMEMIDX-NEXT:    th.flurd fa5, a0, a1, 3
+; RV64XTHEADFMEMIDX-NEXT:    fadd.d fa0, fa5, fa5
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = getelementptr double, double* %a, i64 %1
@@ -118,14 +118,14 @@ define double @flurd(double* %a, i32 %b) {
 define void @fsrd(double* %a, i64 %b, double %c) {
 ; RV32XTHEADMEMIDX-LABEL: fsrd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
-; RV32XTHEADMEMIDX-NEXT:    fadd.d ft0, fa0, fa0
-; RV32XTHEADMEMIDX-NEXT:    th.fsrd ft0, a0, a1, 3
+; RV32XTHEADMEMIDX-NEXT:    fadd.d fa5, fa0, fa0
+; RV32XTHEADMEMIDX-NEXT:    th.fsrd fa5, a0, a1, 3
 ; RV32XTHEADMEMIDX-NEXT:    ret
 ;
 ; RV64XTHEADFMEMIDX-LABEL: fsrd:
 ; RV64XTHEADFMEMIDX:       # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT:    fadd.d ft0, fa0, fa0
-; RV64XTHEADFMEMIDX-NEXT:    th.fsrd ft0, a0, a1, 3
+; RV64XTHEADFMEMIDX-NEXT:    fadd.d fa5, fa0, fa0
+; RV64XTHEADFMEMIDX-NEXT:    th.fsrd fa5, a0, a1, 3
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = fadd double %c, %c
   %2 = getelementptr double, double* %a, i64 %b
@@ -136,14 +136,14 @@ define void @fsrd(double* %a, i64 %b, double %c) {
 define void @fsurd(double* %a, i32 %b, double %c) {
 ; RV32XTHEADMEMIDX-LABEL: fsurd:
 ; RV32XTHEADMEMIDX:       # %bb.0:
-; RV32XTHEADMEMIDX-NEXT:    fadd.d ft0, fa0, fa0
-; RV32XTHEADMEMIDX-NEXT:    th.fsrd ft0, a0, a1, 3
+; RV32XTHEADMEMIDX-NEXT:    fadd.d fa5, fa0, fa0
+; RV32XTHEADMEMIDX-NEXT:    th.fsrd fa5, a0, a1, 3
 ; RV32XTHEADMEMIDX-NEXT:    ret
 ;
 ; RV64XTHEADFMEMIDX-LABEL: fsurd:
 ; RV64XTHEADFMEMIDX:       # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT:    fadd.d ft0, fa0, fa0
-; RV64XTHEADFMEMIDX-NEXT:    th.fsurd ft0, a0, a1, 3
+; RV64XTHEADFMEMIDX-NEXT:    fadd.d fa5, fa0, fa0
+; RV64XTHEADFMEMIDX-NEXT:    th.fsurd fa5, a0, a1, 3
 ; RV64XTHEADFMEMIDX-NEXT:    ret
   %1 = zext i32 %b to i64
   %2 = fadd double %c, %c

diff  --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
index cbb578fb33d6..cc84cfebf41e 100644
--- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
@@ -17,16 +17,16 @@ declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata)
 define half @sqrt_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-LABEL: sqrt_f16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    fsqrt.s ft0, ft0
-; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    fsqrt.s fa5, fa5
+; RV32IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: sqrt_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fsqrt.s ft0, ft0
-; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fsqrt.s fa5, fa5
+; RV64IZFHMIN-NEXT:    fcvt.h.s fa0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.sqrt.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret half %1
@@ -233,14 +233,14 @@ declare iXLen @llvm.experimental.constrained.lrint.iXLen.f16(half, metadata, met
 define iXLen @lrint_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-LABEL: lrint_f16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: lrint_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call iXLen @llvm.experimental.constrained.lrint.iXLen.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret iXLen %1
@@ -251,14 +251,14 @@ declare iXLen @llvm.experimental.constrained.lround.iXLen.f16(half, metadata)
 define iXLen @lround_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-LABEL: lround_f16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: lround_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rmm
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call iXLen @llvm.experimental.constrained.lround.iXLen.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret iXLen %1
@@ -279,8 +279,8 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
 ;
 ; RV64IZFHMIN-LABEL: llrint_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
   ret i64 %1
@@ -301,8 +301,8 @@ define i64 @llround_f16(half %a) nounwind strictfp {
 ;
 ; RV64IZFHMIN-LABEL: llround_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rmm
 ; RV64IZFHMIN-NEXT:    ret
   %1 = call i64 @llvm.experimental.constrained.llround.i64.f16(half %a, metadata !"fpexcept.strict") strictfp
   ret i64 %1

diff  --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll
index d1d97312536f..d1a91090a5e4 100644
--- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll
@@ -19,26 +19,26 @@ declare iXLen @llvm.lrint.iXLen.f16(half)
 define iXLen @lrint_f16(half %a) nounwind {
 ; RV32IZFHMIN-LABEL: lrint_f16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: lrint_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5
 ; RV64IZFHMIN-NEXT:    ret
 ;
 ; RV32IDZFHMIN-LABEL: lrint_f16:
 ; RV32IDZFHMIN:       # %bb.0:
-; RV32IDZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IDZFHMIN-NEXT:    fcvt.w.s a0, ft0
+; RV32IDZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IDZFHMIN-NEXT:    fcvt.w.s a0, fa5
 ; RV32IDZFHMIN-NEXT:    ret
 ;
 ; RV64IDZFHMIN-LABEL: lrint_f16:
 ; RV64IDZFHMIN:       # %bb.0:
-; RV64IDZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IDZFHMIN-NEXT:    fcvt.l.s a0, ft0
+; RV64IDZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IDZFHMIN-NEXT:    fcvt.l.s a0, fa5
 ; RV64IDZFHMIN-NEXT:    ret
   %1 = call iXLen @llvm.lrint.iXLen.f16(half %a)
   ret iXLen %1
@@ -49,26 +49,26 @@ declare iXLen @llvm.lround.iXLen.f16(half)
 define iXLen @lround_f16(half %a) nounwind {
 ; RV32IZFHMIN-LABEL: lround_f16:
 ; RV32IZFHMIN:       # %bb.0:
-; RV32IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
+; RV32IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
 ; RV32IZFHMIN-NEXT:    ret
 ;
 ; RV64IZFHMIN-LABEL: lround_f16:
 ; RV64IZFHMIN:       # %bb.0:
-; RV64IZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IZFHMIN-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IZFHMIN-NEXT:    fcvt.l.s a0, fa5, rmm
 ; RV64IZFHMIN-NEXT:    ret
 ;
 ; RV32IDZFHMIN-LABEL: lround_f16:
 ; RV32IDZFHMIN:       # %bb.0:
-; RV32IDZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV32IDZFHMIN-NEXT:    fcvt.w.s a0, ft0, rmm
+; RV32IDZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32IDZFHMIN-NEXT:    fcvt.w.s a0, fa5, rmm
 ; RV32IDZFHMIN-NEXT:    ret
 ;
 ; RV64IDZFHMIN-LABEL: lround_f16:
 ; RV64IDZFHMIN:       # %bb.0:
-; RV64IDZFHMIN-NEXT:    fcvt.s.h ft0, fa0
-; RV64IDZFHMIN-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IDZFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64IDZFHMIN-NEXT:    fcvt.l.s a0, fa5, rmm
 ; RV64IDZFHMIN-NEXT:    ret
   %1 = call iXLen @llvm.lround.iXLen.f16(half %a)
   ret iXLen %1


        


More information about the llvm-commits mailing list