[llvm] [LoongArch] Custom lower FP_TO_FP16 and FP16_TO_FP to correct ABI of libcall (PR #141702)

via llvm-commits llvm-commits at lists.llvm.org
Tue May 27 19:09:37 PDT 2025


https://github.com/Ami-zhang created https://github.com/llvm/llvm-project/pull/141702

This change passes 'half' in the lower 16 bits of an f32 value with F/D ABI. LoongArch currently lacks a hardware extension for the fp16 data type, and the ABI manual now documents the half-precision floating-point type following FP calling conventions.

Previously, we maintained the 'half' type in its 16-bit format between operations. Regardless of whether the F extension is enabled, the value would be passed in the lower 16 bits of a GPR in its 'half' format.

With this patch, depending on the ABI in use, the value will be passed either in an FPR or a GPR in 'half' format. This ensures consistency with the bits location when the fp16 hardware extension is enabled.

>From 31e8b35595d10487b725a73ef369ef74adba988b Mon Sep 17 00:00:00 2001
From: Ami-zhang <zhanglimin at loongson.cn>
Date: Thu, 24 Apr 2025 19:43:20 +0800
Subject: [PATCH] [LoongArch] Custom lower FP_TO_FP16 and FP16_TO_FP to correct
 ABI of libcall

This change passes 'half' in the lower 16 bits of an f32 value with F/D
ABI. LoongArch currently lacks a hardware extension for the fp16 data type,
and the ABI manual now documents the half-precision floating-point type
following FP calling conventions.

Previously, we maintained the 'half' type in its 16-bit format between
operations. Regardless of whether the F extension is enabled, the value
would be passed in the lower 16 bits of a GPR in its 'half' format.

With this patch, depending on the ABI in use, the value will be passed
either in an FPR or a GPR in 'half' format. This ensures consistency with
the bits location when the fp16 hardware extension is enabled.

Co-authored-by: WANG Rui <wangrui at loongson.cn>
---
 .../LoongArch/LoongArchISelLowering.cpp       |  138 +-
 .../Target/LoongArch/LoongArchISelLowering.h  |   24 +
 .../CodeGen/LoongArch/calling-conv-half.ll    | 1628 +++++++++++++++++
 llvm/test/CodeGen/LoongArch/fp16-promote.ll   |  202 +-
 llvm/test/CodeGen/LoongArch/issue97975.ll     |  444 +++++
 llvm/test/CodeGen/LoongArch/issue97981.ll     |  127 ++
 6 files changed, 2489 insertions(+), 74 deletions(-)
 create mode 100644 llvm/test/CodeGen/LoongArch/calling-conv-half.ll
 create mode 100644 llvm/test/CodeGen/LoongArch/issue97975.ll
 create mode 100644 llvm/test/CodeGen/LoongArch/issue97981.ll

diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 9f5c94ddea44f..c7b2a1a8ffbf8 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -199,8 +199,10 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
     setOperationAction(ISD::FPOW, MVT::f32, Expand);
     setOperationAction(ISD::FREM, MVT::f32, Expand);
-    setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
-    setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
+    setOperationAction(ISD::FP16_TO_FP, MVT::f32,
+                       Subtarget.isSoftFPABI() ? LibCall : Custom);
+    setOperationAction(ISD::FP_TO_FP16, MVT::f32,
+                       Subtarget.isSoftFPABI() ? LibCall : Custom);
 
     if (Subtarget.is64Bit())
       setOperationAction(ISD::FRINT, MVT::f32, Legal);
@@ -239,7 +241,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::FPOW, MVT::f64, Expand);
     setOperationAction(ISD::FREM, MVT::f64, Expand);
     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
-    setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
+    setOperationAction(ISD::FP_TO_FP16, MVT::f64,
+                       Subtarget.isSoftFPABI() ? LibCall : Custom);
 
     if (Subtarget.is64Bit())
       setOperationAction(ISD::FRINT, MVT::f64, Legal);
@@ -490,6 +493,10 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
     return lowerPREFETCH(Op, DAG);
   case ISD::SELECT:
     return lowerSELECT(Op, DAG);
+  case ISD::FP_TO_FP16:
+    return lowerFP_TO_FP16(Op, DAG);
+  case ISD::FP16_TO_FP:
+    return lowerFP16_TO_FP(Op, DAG);
   }
   return SDValue();
 }
@@ -2242,6 +2249,40 @@ SDValue LoongArchTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
   return SDValue();
 }
 
+SDValue LoongArchTargetLowering::lowerFP_TO_FP16(SDValue Op,
+                                                 SelectionDAG &DAG) const {
+  // Custom lower to ensure the libcall return is passed in an FPR on hard
+  // float ABIs.
+  SDLoc DL(Op);
+  MakeLibCallOptions CallOptions;
+  SDValue Op0 = Op.getOperand(0);
+  SDValue Chain = SDValue();
+  RTLIB::Libcall LC = RTLIB::getFPROUND(Op0.getValueType(), MVT::f16);
+  SDValue Res;
+  std::tie(Res, Chain) =
+      makeLibCall(DAG, LC, MVT::f32, Op0, CallOptions, DL, Chain);
+  if (Subtarget.is64Bit())
+    return DAG.getNode(LoongArchISD::MOVFR2GR_S_LA64, DL, MVT::i64, Res);
+  return DAG.getBitcast(MVT::i32, Res);
+}
+
+SDValue LoongArchTargetLowering::lowerFP16_TO_FP(SDValue Op,
+                                                 SelectionDAG &DAG) const {
+  // Custom lower to ensure the libcall argument is passed in an FPR on hard
+  // float ABIs.
+  SDLoc DL(Op);
+  MakeLibCallOptions CallOptions;
+  SDValue Op0 = Op.getOperand(0);
+  SDValue Chain = SDValue();
+  SDValue Arg = Subtarget.is64Bit() ? DAG.getNode(LoongArchISD::MOVGR2FR_W_LA64,
+                                                  DL, MVT::f32, Op0)
+                                    : DAG.getBitcast(MVT::f32, Op0);
+  SDValue Res;
+  std::tie(Res, Chain) = makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg,
+                                     CallOptions, DL, Chain);
+  return Res;
+}
+
 static bool isConstantOrUndef(const SDValue Op) {
   if (Op->isUndef())
     return true;
@@ -3841,6 +3882,8 @@ void LoongArchTargetLowering::ReplaceNodeResults(
     EVT FVT = EVT::getFloatingPointVT(N->getValueSizeInBits(0));
     if (getTypeAction(*DAG.getContext(), Src.getValueType()) !=
         TargetLowering::TypeSoftenFloat) {
+      if (!isTypeLegal(Src.getValueType()))
+        return;
       if (Src.getValueType() == MVT::f16)
         Src = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
       SDValue Dst = DAG.getNode(LoongArchISD::FTINT, DL, FVT, Src);
@@ -5289,6 +5332,33 @@ performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
   return SDValue();
 }
 
+static SDValue performMOVGR2FR_WCombine(SDNode *N, SelectionDAG &DAG,
+                                        TargetLowering::DAGCombinerInfo &DCI,
+                                        const LoongArchSubtarget &Subtarget) {
+  // If the input to MOVGR2FR_W_LA64 is just MOVFR2GR_S_LA64 the the
+  // conversion is unnecessary and can be replaced with the
+  // MOVFR2GR_S_LA64 operand.
+  SDValue Op0 = N->getOperand(0);
+  if (Op0.getOpcode() == LoongArchISD::MOVFR2GR_S_LA64)
+    return Op0.getOperand(0);
+  return SDValue();
+}
+
+static SDValue performMOVFR2GR_SCombine(SDNode *N, SelectionDAG &DAG,
+                                        TargetLowering::DAGCombinerInfo &DCI,
+                                        const LoongArchSubtarget &Subtarget) {
+  // If the input to MOVFR2GR_S_LA64 is just MOVGR2FR_W_LA64 then the
+  // conversion is unnecessary and can be replaced with the MOVGR2FR_W_LA64
+  // operand.
+  SDValue Op0 = N->getOperand(0);
+  MVT VT = N->getSimpleValueType(0);
+  if (Op0->getOpcode() == LoongArchISD::MOVGR2FR_W_LA64) {
+    assert(Op0.getOperand(0).getValueType() == VT && "Unexpected value type!");
+    return Op0.getOperand(0);
+  }
+  return SDValue();
+}
+
 SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
                                                    DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -5307,6 +5377,10 @@ SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
     return performBITREV_WCombine(N, DAG, DCI, Subtarget);
   case ISD::INTRINSIC_WO_CHAIN:
     return performINTRINSIC_WO_CHAINCombine(N, DAG, DCI, Subtarget);
+  case LoongArchISD::MOVGR2FR_W_LA64:
+    return performMOVGR2FR_WCombine(N, DAG, DCI, Subtarget);
+  case LoongArchISD::MOVFR2GR_S_LA64:
+    return performMOVFR2GR_SCombine(N, DAG, DCI, Subtarget);
   }
   return SDValue();
 }
@@ -7633,3 +7707,61 @@ LoongArchTargetLowering::getPreferredVectorAction(MVT VT) const {
 
   return TargetLoweringBase::getPreferredVectorAction(VT);
 }
+
+bool LoongArchTargetLowering::splitValueIntoRegisterParts(
+    SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
+    unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
+  bool IsABIRegCopy = CC.has_value();
+  EVT ValueVT = Val.getValueType();
+
+  if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
+    // Cast the f16 to i16, extend to i32, pad with ones to make a float
+    // nan, and cast to f32.
+    Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
+    Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
+    Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
+                      DAG.getConstant(0xFFFF0000, DL, MVT::i32));
+    Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
+    Parts[0] = Val;
+    return true;
+  }
+
+  return false;
+}
+
+SDValue LoongArchTargetLowering::joinRegisterPartsIntoValue(
+    SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
+    MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
+  bool IsABIRegCopy = CC.has_value();
+
+  if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
+    SDValue Val = Parts[0];
+
+    // Cast the f32 to i32, truncate to i16, and cast back to f16.
+    Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
+    Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
+    Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
+    return Val;
+  }
+
+  return SDValue();
+}
+
+MVT LoongArchTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
+                                                           CallingConv::ID CC,
+                                                           EVT VT) const {
+  // Use f32 to pass f16.
+  if (VT == MVT::f16 && Subtarget.hasBasicF())
+    return MVT::f32;
+
+  return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
+}
+
+unsigned LoongArchTargetLowering::getNumRegistersForCallingConv(
+    LLVMContext &Context, CallingConv::ID CC, EVT VT) const {
+  // Use f32 to pass f16.
+  if (VT == MVT::f16 && Subtarget.hasBasicF())
+    return 1;
+
+  return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
+}
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 6bf295984dfc5..8c00ec75db94b 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -361,6 +361,8 @@ class LoongArchTargetLowering : public TargetLowering {
   SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) const;
 
   bool isFPImmLegal(const APFloat &Imm, EVT VT,
                     bool ForCodeSize) const override;
@@ -385,6 +387,28 @@ class LoongArchTargetLowering : public TargetLowering {
       const SmallVectorImpl<CCValAssign> &ArgLocs) const;
 
   bool softPromoteHalfType() const override { return true; }
+
+  bool
+  splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
+                              SDValue *Parts, unsigned NumParts, MVT PartVT,
+                              std::optional<CallingConv::ID> CC) const override;
+
+  SDValue
+  joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
+                             const SDValue *Parts, unsigned NumParts,
+                             MVT PartVT, EVT ValueVT,
+                             std::optional<CallingConv::ID> CC) const override;
+
+  /// Return the register type for a given MVT, ensuring vectors are treated
+  /// as a series of gpr sized integers.
+  MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
+                                    EVT VT) const override;
+
+  /// Return the number of registers for a given MVT, ensuring vectors are
+  /// treated as a series of gpr sized integers.
+  unsigned getNumRegistersForCallingConv(LLVMContext &Context,
+                                         CallingConv::ID CC,
+                                         EVT VT) const override;
 };
 
 } // end namespace llvm
diff --git a/llvm/test/CodeGen/LoongArch/calling-conv-half.ll b/llvm/test/CodeGen/LoongArch/calling-conv-half.ll
new file mode 100644
index 0000000000000..c88b67f13d1e7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-half.ll
@@ -0,0 +1,1628 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32D
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32D
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64D
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64D
+
+define i32 @callee_half_in_fregs(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, half %i) nounwind {
+; LA32S-LABEL: callee_half_in_fregs:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT:    ld.hu $a1, $sp, 16
+; LA32S-NEXT:    move $fp, $a0
+; LA32S-NEXT:    move $a0, $a1
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    bl __fixsfsi
+; LA32S-NEXT:    add.w $a0, $fp, $a0
+; LA32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: callee_half_in_fregs:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    ld.hu $a1, $sp, 16
+; LA32F-ILP32S-NEXT:    move $fp, $a0
+; LA32F-ILP32S-NEXT:    move $a0, $a1
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: callee_half_in_fregs:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    move $fp, $a0
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: callee_half_in_fregs:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    ld.hu $a1, $sp, 16
+; LA32D-ILP32S-NEXT:    move $fp, $a0
+; LA32D-ILP32S-NEXT:    move $a0, $a1
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: callee_half_in_fregs:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    move $fp, $a0
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: callee_half_in_fregs:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT:    move $fp, $a0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64S-NEXT:    add.w $a0, $fp, $a0
+; LA64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: callee_half_in_fregs:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    ld.hu $a1, $sp, 16
+; LA64F-LP64S-NEXT:    move $fp, $a0
+; LA64F-LP64S-NEXT:    move $a0, $a1
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: callee_half_in_fregs:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    move $fp, $a0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: callee_half_in_fregs:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    ld.hu $a1, $sp, 16
+; LA64D-LP64S-NEXT:    move $fp, $a0
+; LA64D-LP64S-NEXT:    move $a0, $a1
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: callee_half_in_fregs:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    move $fp, $a0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = fptosi half %i to i32
+  %2 = add i32 %a, %1
+  ret i32 %2
+}
+
+define i32 @caller_half_in_fregs() nounwind {
+; LA32S-LABEL: caller_half_in_fregs:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    lu12i.w $t0, 4
+; LA32S-NEXT:    ori $a0, $zero, 1
+; LA32S-NEXT:    ori $a1, $zero, 2
+; LA32S-NEXT:    ori $a2, $zero, 3
+; LA32S-NEXT:    ori $a3, $zero, 4
+; LA32S-NEXT:    ori $a4, $zero, 5
+; LA32S-NEXT:    ori $a5, $zero, 6
+; LA32S-NEXT:    ori $a6, $zero, 7
+; LA32S-NEXT:    ori $a7, $zero, 8
+; LA32S-NEXT:    st.w $t0, $sp, 0
+; LA32S-NEXT:    bl callee_half_in_fregs
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: caller_half_in_fregs:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    lu12i.w $t0, -12
+; LA32F-ILP32S-NEXT:    ori $a0, $zero, 1
+; LA32F-ILP32S-NEXT:    ori $a1, $zero, 2
+; LA32F-ILP32S-NEXT:    ori $a2, $zero, 3
+; LA32F-ILP32S-NEXT:    ori $a3, $zero, 4
+; LA32F-ILP32S-NEXT:    ori $a4, $zero, 5
+; LA32F-ILP32S-NEXT:    ori $a5, $zero, 6
+; LA32F-ILP32S-NEXT:    ori $a6, $zero, 7
+; LA32F-ILP32S-NEXT:    ori $a7, $zero, 8
+; LA32F-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32F-ILP32S-NEXT:    bl callee_half_in_fregs
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: caller_half_in_fregs:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA32F-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA32F-ILP32D-NEXT:    ori $a0, $zero, 1
+; LA32F-ILP32D-NEXT:    ori $a1, $zero, 2
+; LA32F-ILP32D-NEXT:    ori $a2, $zero, 3
+; LA32F-ILP32D-NEXT:    ori $a3, $zero, 4
+; LA32F-ILP32D-NEXT:    ori $a4, $zero, 5
+; LA32F-ILP32D-NEXT:    ori $a5, $zero, 6
+; LA32F-ILP32D-NEXT:    ori $a6, $zero, 7
+; LA32F-ILP32D-NEXT:    ori $a7, $zero, 8
+; LA32F-ILP32D-NEXT:    bl callee_half_in_fregs
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: caller_half_in_fregs:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    lu12i.w $t0, -12
+; LA32D-ILP32S-NEXT:    ori $a0, $zero, 1
+; LA32D-ILP32S-NEXT:    ori $a1, $zero, 2
+; LA32D-ILP32S-NEXT:    ori $a2, $zero, 3
+; LA32D-ILP32S-NEXT:    ori $a3, $zero, 4
+; LA32D-ILP32S-NEXT:    ori $a4, $zero, 5
+; LA32D-ILP32S-NEXT:    ori $a5, $zero, 6
+; LA32D-ILP32S-NEXT:    ori $a6, $zero, 7
+; LA32D-ILP32S-NEXT:    ori $a7, $zero, 8
+; LA32D-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32D-ILP32S-NEXT:    bl callee_half_in_fregs
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: caller_half_in_fregs:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA32D-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA32D-ILP32D-NEXT:    ori $a0, $zero, 1
+; LA32D-ILP32D-NEXT:    ori $a1, $zero, 2
+; LA32D-ILP32D-NEXT:    ori $a2, $zero, 3
+; LA32D-ILP32D-NEXT:    ori $a3, $zero, 4
+; LA32D-ILP32D-NEXT:    ori $a4, $zero, 5
+; LA32D-ILP32D-NEXT:    ori $a5, $zero, 6
+; LA32D-ILP32D-NEXT:    ori $a6, $zero, 7
+; LA32D-ILP32D-NEXT:    ori $a7, $zero, 8
+; LA32D-ILP32D-NEXT:    bl callee_half_in_fregs
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: caller_half_in_fregs:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA64S-NEXT:    ori $a0, $zero, 1
+; LA64S-NEXT:    ori $a1, $zero, 2
+; LA64S-NEXT:    ori $a2, $zero, 3
+; LA64S-NEXT:    ori $a3, $zero, 4
+; LA64S-NEXT:    ori $a4, $zero, 5
+; LA64S-NEXT:    ori $a5, $zero, 6
+; LA64S-NEXT:    ori $a6, $zero, 7
+; LA64S-NEXT:    ori $a7, $zero, 8
+; LA64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_fregs)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: caller_half_in_fregs:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    lu12i.w $t0, -12
+; LA64F-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64F-LP64S-NEXT:    ori $a0, $zero, 1
+; LA64F-LP64S-NEXT:    ori $a1, $zero, 2
+; LA64F-LP64S-NEXT:    ori $a2, $zero, 3
+; LA64F-LP64S-NEXT:    ori $a3, $zero, 4
+; LA64F-LP64S-NEXT:    ori $a4, $zero, 5
+; LA64F-LP64S-NEXT:    ori $a5, $zero, 6
+; LA64F-LP64S-NEXT:    ori $a6, $zero, 7
+; LA64F-LP64S-NEXT:    ori $a7, $zero, 8
+; LA64F-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_fregs)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: caller_half_in_fregs:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA64F-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA64F-LP64D-NEXT:    ori $a0, $zero, 1
+; LA64F-LP64D-NEXT:    ori $a1, $zero, 2
+; LA64F-LP64D-NEXT:    ori $a2, $zero, 3
+; LA64F-LP64D-NEXT:    ori $a3, $zero, 4
+; LA64F-LP64D-NEXT:    ori $a4, $zero, 5
+; LA64F-LP64D-NEXT:    ori $a5, $zero, 6
+; LA64F-LP64D-NEXT:    ori $a6, $zero, 7
+; LA64F-LP64D-NEXT:    ori $a7, $zero, 8
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_in_fregs)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: caller_half_in_fregs:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    lu12i.w $t0, -12
+; LA64D-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64D-LP64S-NEXT:    ori $a0, $zero, 1
+; LA64D-LP64S-NEXT:    ori $a1, $zero, 2
+; LA64D-LP64S-NEXT:    ori $a2, $zero, 3
+; LA64D-LP64S-NEXT:    ori $a3, $zero, 4
+; LA64D-LP64S-NEXT:    ori $a4, $zero, 5
+; LA64D-LP64S-NEXT:    ori $a5, $zero, 6
+; LA64D-LP64S-NEXT:    ori $a6, $zero, 7
+; LA64D-LP64S-NEXT:    ori $a7, $zero, 8
+; LA64D-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_fregs)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: caller_half_in_fregs:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA64D-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA64D-LP64D-NEXT:    ori $a0, $zero, 1
+; LA64D-LP64D-NEXT:    ori $a1, $zero, 2
+; LA64D-LP64D-NEXT:    ori $a2, $zero, 3
+; LA64D-LP64D-NEXT:    ori $a3, $zero, 4
+; LA64D-LP64D-NEXT:    ori $a4, $zero, 5
+; LA64D-LP64D-NEXT:    ori $a5, $zero, 6
+; LA64D-LP64D-NEXT:    ori $a6, $zero, 7
+; LA64D-LP64D-NEXT:    ori $a7, $zero, 8
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_in_fregs)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = call i32 @callee_half_in_fregs(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 2.0)
+  ret i32 %1
+}
+
+define i32 @callee_half_in_gregs(half %a, half %b, half %c, half %d, half %e, half %f, half %g, half %h, half %i, i32 %j) nounwind {
+; LA32S-LABEL: callee_half_in_gregs:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT:    ld.w $fp, $sp, 20
+; LA32S-NEXT:    ld.hu $a0, $sp, 16
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    bl __fixsfsi
+; LA32S-NEXT:    add.w $a0, $fp, $a0
+; LA32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: callee_half_in_gregs:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 20
+; LA32F-ILP32S-NEXT:    ld.hu $a0, $sp, 16
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: callee_half_in_gregs:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    move $fp, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: callee_half_in_gregs:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    ld.w $fp, $sp, 20
+; LA32D-ILP32S-NEXT:    ld.hu $a0, $sp, 16
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: callee_half_in_gregs:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    move $fp, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: callee_half_in_gregs:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT:    move $fp, $a1
+; LA64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64S-NEXT:    add.w $a0, $fp, $a0
+; LA64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: callee_half_in_gregs:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    ld.w $fp, $sp, 24
+; LA64F-LP64S-NEXT:    ld.hu $a0, $sp, 16
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: callee_half_in_gregs:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    move $fp, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: callee_half_in_gregs:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    ld.w $fp, $sp, 24
+; LA64D-LP64S-NEXT:    ld.hu $a0, $sp, 16
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: callee_half_in_gregs:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    move $fp, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = fptosi half %i to i32
+  %2 = add i32 %j, %1
+  ret i32 %2
+}
+
+define i32 @caller_half_in_gregs() nounwind {
+; LA32S-LABEL: caller_half_in_gregs:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    ori $a0, $zero, 10
+; LA32S-NEXT:    st.w $a0, $sp, 4
+; LA32S-NEXT:    lu12i.w $a1, 4
+; LA32S-NEXT:    ori $t0, $a1, 2176
+; LA32S-NEXT:    lu12i.w $a0, 3
+; LA32S-NEXT:    ori $a0, $a0, 3072
+; LA32S-NEXT:    ori $a2, $a1, 512
+; LA32S-NEXT:    ori $a3, $a1, 1024
+; LA32S-NEXT:    ori $a4, $a1, 1280
+; LA32S-NEXT:    ori $a5, $a1, 1536
+; LA32S-NEXT:    ori $a6, $a1, 1792
+; LA32S-NEXT:    ori $a7, $a1, 2048
+; LA32S-NEXT:    st.w $t0, $sp, 0
+; LA32S-NEXT:    bl callee_half_in_gregs
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: caller_half_in_gregs:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    ori $a0, $zero, 10
+; LA32F-ILP32S-NEXT:    st.w $a0, $sp, 4
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -12
+; LA32F-ILP32S-NEXT:    ori $t0, $a1, 2176
+; LA32F-ILP32S-NEXT:    lu12i.w $a0, -13
+; LA32F-ILP32S-NEXT:    ori $a0, $a0, 3072
+; LA32F-ILP32S-NEXT:    ori $a2, $a1, 512
+; LA32F-ILP32S-NEXT:    ori $a3, $a1, 1024
+; LA32F-ILP32S-NEXT:    ori $a4, $a1, 1280
+; LA32F-ILP32S-NEXT:    ori $a5, $a1, 1536
+; LA32F-ILP32S-NEXT:    ori $a6, $a1, 1792
+; LA32F-ILP32S-NEXT:    ori $a7, $a1, 2048
+; LA32F-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32F-ILP32S-NEXT:    bl callee_half_in_gregs
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: caller_half_in_gregs:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA32F-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA32F-ILP32D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA32F-ILP32D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI3_2)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA32F-ILP32D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI3_3)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA32F-ILP32D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI3_4)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA32F-ILP32D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI3_5)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA32F-ILP32D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI3_6)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA32F-ILP32D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI3_7)
+; LA32F-ILP32D-NEXT:    lu12i.w $a0, -12
+; LA32F-ILP32D-NEXT:    ori $a0, $a0, 2176
+; LA32F-ILP32D-NEXT:    ori $a1, $zero, 10
+; LA32F-ILP32D-NEXT:    bl callee_half_in_gregs
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: caller_half_in_gregs:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    ori $a0, $zero, 10
+; LA32D-ILP32S-NEXT:    st.w $a0, $sp, 4
+; LA32D-ILP32S-NEXT:    lu12i.w $a1, -12
+; LA32D-ILP32S-NEXT:    ori $t0, $a1, 2176
+; LA32D-ILP32S-NEXT:    lu12i.w $a0, -13
+; LA32D-ILP32S-NEXT:    ori $a0, $a0, 3072
+; LA32D-ILP32S-NEXT:    ori $a2, $a1, 512
+; LA32D-ILP32S-NEXT:    ori $a3, $a1, 1024
+; LA32D-ILP32S-NEXT:    ori $a4, $a1, 1280
+; LA32D-ILP32S-NEXT:    ori $a5, $a1, 1536
+; LA32D-ILP32S-NEXT:    ori $a6, $a1, 1792
+; LA32D-ILP32S-NEXT:    ori $a7, $a1, 2048
+; LA32D-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32D-ILP32S-NEXT:    bl callee_half_in_gregs
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: caller_half_in_gregs:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA32D-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA32D-ILP32D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA32D-ILP32D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI3_2)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA32D-ILP32D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI3_3)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA32D-ILP32D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI3_4)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA32D-ILP32D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI3_5)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA32D-ILP32D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI3_6)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA32D-ILP32D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI3_7)
+; LA32D-ILP32D-NEXT:    lu12i.w $a0, -12
+; LA32D-ILP32D-NEXT:    ori $a0, $a0, 2176
+; LA32D-ILP32D-NEXT:    ori $a1, $zero, 10
+; LA32D-ILP32D-NEXT:    bl callee_half_in_gregs
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: caller_half_in_gregs:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64S-NEXT:    fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA64S-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA64S-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA64S-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA64S-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA64S-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA64S-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_8)
+; LA64S-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
+; LA64S-NEXT:    movfr2gr.s $a0, $ft0
+; LA64S-NEXT:    ori $a1, $zero, 10
+; LA64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_gregs)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: caller_half_in_gregs:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -32
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
+; LA64F-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64F-LP64S-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI3_2)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a1, %pc_lo12(.LCPI3_2)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a1, $fa1
+; LA64F-LP64S-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_3)
+; LA64F-LP64S-NEXT:    fld.s $fa1, $a2, %pc_lo12(.LCPI3_3)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-LP64S-NEXT:    pcalau12i $a3, %pc_hi20(.LCPI3_4)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a3, %pc_lo12(.LCPI3_4)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a3, $fa1
+; LA64F-LP64S-NEXT:    pcalau12i $a4, %pc_hi20(.LCPI3_5)
+; LA64F-LP64S-NEXT:    fld.s $fa1, $a4, %pc_lo12(.LCPI3_5)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a4, $fa0
+; LA64F-LP64S-NEXT:    pcalau12i $a5, %pc_hi20(.LCPI3_6)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a5, %pc_lo12(.LCPI3_6)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a5, $fa1
+; LA64F-LP64S-NEXT:    ori $a6, $zero, 10
+; LA64F-LP64S-NEXT:    st.d $a6, $sp, 8
+; LA64F-LP64S-NEXT:    movfr2gr.s $a6, $fa0
+; LA64F-LP64S-NEXT:    pcalau12i $a7, %pc_hi20(.LCPI3_7)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a7, %pc_lo12(.LCPI3_7)
+; LA64F-LP64S-NEXT:    lu12i.w $a7, -12
+; LA64F-LP64S-NEXT:    ori $t0, $a7, 2176
+; LA64F-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64F-LP64S-NEXT:    movfr2gr.s $a7, $fa0
+; LA64F-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_gregs)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 32
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: caller_half_in_gregs:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64F-LP64D-NEXT:    fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64F-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA64F-LP64D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA64F-LP64D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA64F-LP64D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA64F-LP64D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA64F-LP64D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA64F-LP64D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_8)
+; LA64F-LP64D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $ft0
+; LA64F-LP64D-NEXT:    ori $a1, $zero, 10
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_in_gregs)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: caller_half_in_gregs:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -32
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
+; LA64D-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64D-LP64S-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI3_2)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a1, %pc_lo12(.LCPI3_2)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a1, $fa1
+; LA64D-LP64S-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_3)
+; LA64D-LP64S-NEXT:    fld.s $fa1, $a2, %pc_lo12(.LCPI3_3)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-LP64S-NEXT:    pcalau12i $a3, %pc_hi20(.LCPI3_4)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a3, %pc_lo12(.LCPI3_4)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a3, $fa1
+; LA64D-LP64S-NEXT:    pcalau12i $a4, %pc_hi20(.LCPI3_5)
+; LA64D-LP64S-NEXT:    fld.s $fa1, $a4, %pc_lo12(.LCPI3_5)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a4, $fa0
+; LA64D-LP64S-NEXT:    pcalau12i $a5, %pc_hi20(.LCPI3_6)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a5, %pc_lo12(.LCPI3_6)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a5, $fa1
+; LA64D-LP64S-NEXT:    ori $a6, $zero, 10
+; LA64D-LP64S-NEXT:    st.d $a6, $sp, 8
+; LA64D-LP64S-NEXT:    movfr2gr.s $a6, $fa0
+; LA64D-LP64S-NEXT:    pcalau12i $a7, %pc_hi20(.LCPI3_7)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a7, %pc_lo12(.LCPI3_7)
+; LA64D-LP64S-NEXT:    lu12i.w $a7, -12
+; LA64D-LP64S-NEXT:    ori $t0, $a7, 2176
+; LA64D-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64D-LP64S-NEXT:    movfr2gr.s $a7, $fa0
+; LA64D-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_gregs)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 32
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: caller_half_in_gregs:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64D-LP64D-NEXT:    fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64D-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA64D-LP64D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA64D-LP64D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA64D-LP64D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA64D-LP64D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA64D-LP64D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA64D-LP64D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_8)
+; LA64D-LP64D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $ft0
+; LA64D-LP64D-NEXT:    ori $a1, $zero, 10
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_in_gregs)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = call i32 @callee_half_in_gregs(half 1.0, half 2.0, half 3.0, half 4.0, half 5.0, half 6.0, half 7.0, half 8.0, half 9.0, i32 10)
+  ret i32 %1
+}
+
+define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, half %i, half %j, half %k, half %l, half %m, half %n, half %o, half %p, half %q) nounwind {
+; LA32S-LABEL: callee_half_on_stack:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT:    ld.hu $a0, $sp, 48
+; LA32S-NEXT:    move $fp, $a7
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    bl __fixsfsi
+; LA32S-NEXT:    add.w $a0, $fp, $a0
+; LA32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: callee_half_on_stack:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    ld.hu $a0, $sp, 48
+; LA32F-ILP32S-NEXT:    move $fp, $a7
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: callee_half_on_stack:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    ld.hu $a0, $sp, 16
+; LA32F-ILP32D-NEXT:    move $fp, $a7
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: callee_half_on_stack:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    ld.hu $a0, $sp, 48
+; LA32D-ILP32S-NEXT:    move $fp, $a7
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: callee_half_on_stack:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    ld.hu $a0, $sp, 16
+; LA32D-ILP32D-NEXT:    move $fp, $a7
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: callee_half_on_stack:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT:    ld.hu $a0, $sp, 16
+; LA64S-NEXT:    move $fp, $a7
+; LA64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64S-NEXT:    add.w $a0, $fp, $a0
+; LA64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: callee_half_on_stack:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    ld.hu $a0, $sp, 80
+; LA64F-LP64S-NEXT:    move $fp, $a7
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: callee_half_on_stack:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    ld.hu $a0, $sp, 16
+; LA64F-LP64D-NEXT:    move $fp, $a7
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: callee_half_on_stack:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    ld.hu $a0, $sp, 80
+; LA64D-LP64S-NEXT:    move $fp, $a7
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: callee_half_on_stack:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    ld.hu $a0, $sp, 16
+; LA64D-LP64D-NEXT:    move $fp, $a7
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = fptosi half %q to i32
+  %2 = add i32 %h, %1
+  ret i32 %2
+}
+
+define i32 @caller_half_on_stack() nounwind {
+; LA32S-LABEL: caller_half_on_stack:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -48
+; LA32S-NEXT:    st.w $ra, $sp, 44 # 4-byte Folded Spill
+; LA32S-NEXT:    lu12i.w $a0, 4
+; LA32S-NEXT:    ori $a1, $a0, 3200
+; LA32S-NEXT:    st.w $a1, $sp, 32
+; LA32S-NEXT:    ori $a1, $a0, 3136
+; LA32S-NEXT:    st.w $a1, $sp, 28
+; LA32S-NEXT:    ori $a1, $a0, 3072
+; LA32S-NEXT:    st.w $a1, $sp, 24
+; LA32S-NEXT:    ori $a1, $a0, 2944
+; LA32S-NEXT:    st.w $a1, $sp, 20
+; LA32S-NEXT:    ori $a1, $a0, 2816
+; LA32S-NEXT:    st.w $a1, $sp, 16
+; LA32S-NEXT:    ori $a1, $a0, 2688
+; LA32S-NEXT:    st.w $a1, $sp, 12
+; LA32S-NEXT:    ori $a1, $a0, 2560
+; LA32S-NEXT:    st.w $a1, $sp, 8
+; LA32S-NEXT:    ori $a1, $a0, 2432
+; LA32S-NEXT:    st.w $a1, $sp, 4
+; LA32S-NEXT:    ori $t0, $a0, 2304
+; LA32S-NEXT:    ori $a0, $zero, 1
+; LA32S-NEXT:    ori $a1, $zero, 2
+; LA32S-NEXT:    ori $a2, $zero, 3
+; LA32S-NEXT:    ori $a3, $zero, 4
+; LA32S-NEXT:    ori $a4, $zero, 5
+; LA32S-NEXT:    ori $a5, $zero, 6
+; LA32S-NEXT:    ori $a6, $zero, 7
+; LA32S-NEXT:    ori $a7, $zero, 8
+; LA32S-NEXT:    st.w $t0, $sp, 0
+; LA32S-NEXT:    bl callee_half_on_stack
+; LA32S-NEXT:    ld.w $ra, $sp, 44 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 48
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: caller_half_on_stack:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -48
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 44 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    lu12i.w $a0, -12
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 3200
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 32
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 3136
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 28
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 3072
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 24
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 2944
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 20
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 2816
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 16
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 2688
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 12
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 2560
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 8
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 2432
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 4
+; LA32F-ILP32S-NEXT:    ori $t0, $a0, 2304
+; LA32F-ILP32S-NEXT:    ori $a0, $zero, 1
+; LA32F-ILP32S-NEXT:    ori $a1, $zero, 2
+; LA32F-ILP32S-NEXT:    ori $a2, $zero, 3
+; LA32F-ILP32S-NEXT:    ori $a3, $zero, 4
+; LA32F-ILP32S-NEXT:    ori $a4, $zero, 5
+; LA32F-ILP32S-NEXT:    ori $a5, $zero, 6
+; LA32F-ILP32S-NEXT:    ori $a6, $zero, 7
+; LA32F-ILP32S-NEXT:    ori $a7, $zero, 8
+; LA32F-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32F-ILP32S-NEXT:    bl callee_half_on_stack
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 44 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 48
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: caller_half_on_stack:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    lu12i.w $a0, -12
+; LA32F-ILP32D-NEXT:    ori $t0, $a0, 3200
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA32F-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA32F-ILP32D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA32F-ILP32D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA32F-ILP32D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA32F-ILP32D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA32F-ILP32D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA32F-ILP32D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA32F-ILP32D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA32F-ILP32D-NEXT:    ori $a0, $zero, 1
+; LA32F-ILP32D-NEXT:    ori $a1, $zero, 2
+; LA32F-ILP32D-NEXT:    ori $a2, $zero, 3
+; LA32F-ILP32D-NEXT:    ori $a3, $zero, 4
+; LA32F-ILP32D-NEXT:    ori $a4, $zero, 5
+; LA32F-ILP32D-NEXT:    ori $a5, $zero, 6
+; LA32F-ILP32D-NEXT:    ori $a6, $zero, 7
+; LA32F-ILP32D-NEXT:    ori $a7, $zero, 8
+; LA32F-ILP32D-NEXT:    st.w $t0, $sp, 0
+; LA32F-ILP32D-NEXT:    bl callee_half_on_stack
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: caller_half_on_stack:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -48
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 44 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    lu12i.w $a0, -12
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 3200
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 32
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 3136
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 28
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 3072
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 24
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 2944
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 20
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 2816
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 16
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 2688
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 12
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 2560
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 8
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 2432
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 4
+; LA32D-ILP32S-NEXT:    ori $t0, $a0, 2304
+; LA32D-ILP32S-NEXT:    ori $a0, $zero, 1
+; LA32D-ILP32S-NEXT:    ori $a1, $zero, 2
+; LA32D-ILP32S-NEXT:    ori $a2, $zero, 3
+; LA32D-ILP32S-NEXT:    ori $a3, $zero, 4
+; LA32D-ILP32S-NEXT:    ori $a4, $zero, 5
+; LA32D-ILP32S-NEXT:    ori $a5, $zero, 6
+; LA32D-ILP32S-NEXT:    ori $a6, $zero, 7
+; LA32D-ILP32S-NEXT:    ori $a7, $zero, 8
+; LA32D-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32D-ILP32S-NEXT:    bl callee_half_on_stack
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 44 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 48
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: caller_half_on_stack:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    lu12i.w $a0, -12
+; LA32D-ILP32D-NEXT:    ori $t0, $a0, 3200
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA32D-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA32D-ILP32D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA32D-ILP32D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA32D-ILP32D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA32D-ILP32D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA32D-ILP32D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA32D-ILP32D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA32D-ILP32D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA32D-ILP32D-NEXT:    ori $a0, $zero, 1
+; LA32D-ILP32D-NEXT:    ori $a1, $zero, 2
+; LA32D-ILP32D-NEXT:    ori $a2, $zero, 3
+; LA32D-ILP32D-NEXT:    ori $a3, $zero, 4
+; LA32D-ILP32D-NEXT:    ori $a4, $zero, 5
+; LA32D-ILP32D-NEXT:    ori $a5, $zero, 6
+; LA32D-ILP32D-NEXT:    ori $a6, $zero, 7
+; LA32D-ILP32D-NEXT:    ori $a7, $zero, 8
+; LA32D-ILP32D-NEXT:    st.w $t0, $sp, 0
+; LA32D-ILP32D-NEXT:    bl callee_half_on_stack
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: caller_half_on_stack:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    lu12i.w $a0, -12
+; LA64S-NEXT:    ori $t0, $a0, 3200
+; LA64S-NEXT:    lu32i.d $t0, 0
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA64S-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA64S-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA64S-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA64S-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA64S-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA64S-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA64S-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA64S-NEXT:    ori $a0, $zero, 1
+; LA64S-NEXT:    ori $a1, $zero, 2
+; LA64S-NEXT:    ori $a2, $zero, 3
+; LA64S-NEXT:    ori $a3, $zero, 4
+; LA64S-NEXT:    ori $a4, $zero, 5
+; LA64S-NEXT:    ori $a5, $zero, 6
+; LA64S-NEXT:    ori $a6, $zero, 7
+; LA64S-NEXT:    ori $a7, $zero, 8
+; LA64S-NEXT:    st.w $t0, $sp, 0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(callee_half_on_stack)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: caller_half_on_stack:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -80
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    lu12i.w $a0, -12
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 3200
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 64
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 3136
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 56
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 3072
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 48
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 2944
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 40
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 2816
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 32
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 2688
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 24
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 2560
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 16
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 2432
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 8
+; LA64F-LP64S-NEXT:    ori $t0, $a0, 2304
+; LA64F-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64F-LP64S-NEXT:    ori $a0, $zero, 1
+; LA64F-LP64S-NEXT:    ori $a1, $zero, 2
+; LA64F-LP64S-NEXT:    ori $a2, $zero, 3
+; LA64F-LP64S-NEXT:    ori $a3, $zero, 4
+; LA64F-LP64S-NEXT:    ori $a4, $zero, 5
+; LA64F-LP64S-NEXT:    ori $a5, $zero, 6
+; LA64F-LP64S-NEXT:    ori $a6, $zero, 7
+; LA64F-LP64S-NEXT:    ori $a7, $zero, 8
+; LA64F-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_on_stack)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 80
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: caller_half_on_stack:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    lu12i.w $a0, -12
+; LA64F-LP64D-NEXT:    ori $t0, $a0, 3200
+; LA64F-LP64D-NEXT:    lu32i.d $t0, 0
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA64F-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA64F-LP64D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA64F-LP64D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA64F-LP64D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA64F-LP64D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA64F-LP64D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA64F-LP64D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA64F-LP64D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA64F-LP64D-NEXT:    ori $a0, $zero, 1
+; LA64F-LP64D-NEXT:    ori $a1, $zero, 2
+; LA64F-LP64D-NEXT:    ori $a2, $zero, 3
+; LA64F-LP64D-NEXT:    ori $a3, $zero, 4
+; LA64F-LP64D-NEXT:    ori $a4, $zero, 5
+; LA64F-LP64D-NEXT:    ori $a5, $zero, 6
+; LA64F-LP64D-NEXT:    ori $a6, $zero, 7
+; LA64F-LP64D-NEXT:    ori $a7, $zero, 8
+; LA64F-LP64D-NEXT:    st.w $t0, $sp, 0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_on_stack)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: caller_half_on_stack:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -80
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    lu12i.w $a0, -12
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 3200
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 64
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 3136
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 56
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 3072
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 48
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 2944
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 40
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 2816
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 32
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 2688
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 24
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 2560
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 16
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 2432
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 8
+; LA64D-LP64S-NEXT:    ori $t0, $a0, 2304
+; LA64D-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64D-LP64S-NEXT:    ori $a0, $zero, 1
+; LA64D-LP64S-NEXT:    ori $a1, $zero, 2
+; LA64D-LP64S-NEXT:    ori $a2, $zero, 3
+; LA64D-LP64S-NEXT:    ori $a3, $zero, 4
+; LA64D-LP64S-NEXT:    ori $a4, $zero, 5
+; LA64D-LP64S-NEXT:    ori $a5, $zero, 6
+; LA64D-LP64S-NEXT:    ori $a6, $zero, 7
+; LA64D-LP64S-NEXT:    ori $a7, $zero, 8
+; LA64D-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_on_stack)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 80
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: caller_half_on_stack:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    lu12i.w $a0, -12
+; LA64D-LP64D-NEXT:    ori $t0, $a0, 3200
+; LA64D-LP64D-NEXT:    lu32i.d $t0, 0
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA64D-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA64D-LP64D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA64D-LP64D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA64D-LP64D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA64D-LP64D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA64D-LP64D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA64D-LP64D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA64D-LP64D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA64D-LP64D-NEXT:    ori $a0, $zero, 1
+; LA64D-LP64D-NEXT:    ori $a1, $zero, 2
+; LA64D-LP64D-NEXT:    ori $a2, $zero, 3
+; LA64D-LP64D-NEXT:    ori $a3, $zero, 4
+; LA64D-LP64D-NEXT:    ori $a4, $zero, 5
+; LA64D-LP64D-NEXT:    ori $a5, $zero, 6
+; LA64D-LP64D-NEXT:    ori $a6, $zero, 7
+; LA64D-LP64D-NEXT:    ori $a7, $zero, 8
+; LA64D-LP64D-NEXT:    st.w $t0, $sp, 0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_on_stack)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = call i32 @callee_half_on_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 10.0, half 11.0, half 12.0, half 13.0, half 14.0, half 15.0, half 16.0, half 17.0, half 18.0)
+  ret i32 %1
+}
+
+define half @callee_half_ret() nounwind {
+; LA32S-LABEL: callee_half_ret:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    lu12i.w $a0, 3
+; LA32S-NEXT:    ori $a0, $a0, 3072
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: callee_half_ret:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    lu12i.w $a0, -13
+; LA32F-ILP32S-NEXT:    ori $a0, $a0, 3072
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: callee_half_ret:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA32F-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: callee_half_ret:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    lu12i.w $a0, -13
+; LA32D-ILP32S-NEXT:    ori $a0, $a0, 3072
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: callee_half_ret:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA32D-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: callee_half_ret:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: callee_half_ret:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: callee_half_ret:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64F-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: callee_half_ret:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: callee_half_ret:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64D-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64D-LP64D-NEXT:    ret
+  ret half 1.0
+}
+
+define i32 @caller_half_ret() nounwind {
+; LA32S-LABEL: caller_half_ret:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    bl callee_half_ret
+; LA32S-NEXT:    lu12i.w $a1, 15
+; LA32S-NEXT:    ori $a1, $a1, 4095
+; LA32S-NEXT:    and $a0, $a0, $a1
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    bl __fixsfsi
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: caller_half_ret:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    bl callee_half_ret
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: caller_half_ret:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    bl callee_half_ret
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: caller_half_ret:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    bl callee_half_ret
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: caller_half_ret:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    bl callee_half_ret
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: caller_half_ret:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    pcaddu18i $ra, %call36(callee_half_ret)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: caller_half_ret:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_ret)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: caller_half_ret:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_ret)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: caller_half_ret:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_ret)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: caller_half_ret:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_ret)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = call half @callee_half_ret()
+  %2 = fptosi half %1 to i32
+  ret i32 %2
+}
diff --git a/llvm/test/CodeGen/LoongArch/fp16-promote.ll b/llvm/test/CodeGen/LoongArch/fp16-promote.ll
index 6a1610c27937d..c49e9ba99e0f8 100644
--- a/llvm/test/CodeGen/LoongArch/fp16-promote.ll
+++ b/llvm/test/CodeGen/LoongArch/fp16-promote.ll
@@ -22,14 +22,26 @@ define void @test_load_store(ptr %p, ptr %q) nounwind {
 define float @test_fpextend_float(ptr %p) nounwind {
 ; LA32-LABEL: test_fpextend_float:
 ; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ld.hu $a0, $a0, 0
-; LA32-NEXT:    b __extendhfsf2
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_fpextend_float:
 ; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; LA64-NEXT:    ld.hu $a0, $a0, 0
-; LA64-NEXT:    pcaddu18i $t8, %call36(__extendhfsf2)
-; LA64-NEXT:    jr $t8
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
   %a = load half, ptr %p
   %r = fpext half %a to float
   ret float %r
@@ -41,6 +53,7 @@ define double @test_fpextend_double(ptr %p) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ld.hu $a0, $a0, 0
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fcvt.d.s $fa0, $fa0
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -52,6 +65,7 @@ define double @test_fpextend_double(ptr %p) nounwind {
 ; LA64-NEXT:    addi.d $sp, $sp, -16
 ; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; LA64-NEXT:    ld.hu $a0, $a0, 0
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
 ; LA64-NEXT:    fcvt.d.s $fa0, $fa0
@@ -71,6 +85,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
 ; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
 ; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
 ; LA32-NEXT:    st.h $a0, $fp, 0
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -85,6 +100,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
 ; LA64-NEXT:    move $fp, $a0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
 ; LA64-NEXT:    st.h $a0, $fp, 0
 ; LA64-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
@@ -103,6 +119,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 ; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
 ; LA32-NEXT:    bl __truncdfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
 ; LA32-NEXT:    st.h $a0, $fp, 0
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -117,6 +134,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 ; LA64-NEXT:    move $fp, $a0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncdfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
 ; LA64-NEXT:    st.h $a0, $fp, 0
 ; LA64-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
@@ -130,43 +148,51 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 define half @test_fadd_reg(half %a, half %b) nounwind {
 ; LA32-LABEL: test_fadd_reg:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
-; LA32-NEXT:    fst.d $fs0, $sp, 0 # 8-byte Folded Spill
-; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    move $a0, $a1
-; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
 ; LA32-NEXT:    fmov.s $fs0, $fa0
-; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    fmov.s $fa0, $fa1
+; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    fmov.s $fs1, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs0
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fadd.s $fa0, $fa0, $fs0
+; LA32-NEXT:    fadd.s $fa0, $fa0, $fs1
 ; LA32-NEXT:    bl __truncsfhf2
-; LA32-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
-; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    lu12i.w $a1, -16
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_fadd_reg:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $sp, $sp, -32
 ; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
-; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
-; LA64-NEXT:    fst.d $fs0, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    move $fp, $a0
-; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    fmov.s $fs0, $fa0
+; LA64-NEXT:    fmov.s $fa0, $fa1
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fmov.s $fs0, $fa0
-; LA64-NEXT:    move $a0, $fp
+; LA64-NEXT:    fmov.s $fs1, $fa0
+; LA64-NEXT:    fmov.s $fa0, $fs0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fadd.s $fa0, $fa0, $fs0
+; LA64-NEXT:    fadd.s $fa0, $fa0, $fs1
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 32
 ; LA64-NEXT:    ret
@@ -180,20 +206,23 @@ define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -32
 ; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
-; LA32-NEXT:    fst.d $fs0, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    ld.hu $s0, $a0, 0
-; LA32-NEXT:    ld.hu $a0, $a1, 0
+; LA32-NEXT:    ld.hu $a0, $a0, 0
+; LA32-NEXT:    ld.hu $a1, $a1, 0
+; LA32-NEXT:    movgr2fr.w $fs0, $a0
+; LA32-NEXT:    movgr2fr.w $fa0, $a1
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fmov.s $fs0, $fa0
-; LA32-NEXT:    move $a0, $s0
+; LA32-NEXT:    fmov.s $fs1, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs0
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fadd.s $fa0, $fa0, $fs0
+; LA32-NEXT:    fadd.s $fa0, $fa0, $fs1
 ; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
 ; LA32-NEXT:    st.h $a0, $fp, 0
-; LA32-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
 ; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 32
@@ -209,15 +238,17 @@ define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
 ; LA64-NEXT:    move $fp, $a0
 ; LA64-NEXT:    ld.hu $s0, $a0, 0
 ; LA64-NEXT:    ld.hu $a0, $a1, 0
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
 ; LA64-NEXT:    fmov.s $fs0, $fa0
-; LA64-NEXT:    move $a0, $s0
+; LA64-NEXT:    movgr2fr.w $fa0, $s0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
 ; LA64-NEXT:    fadd.s $fa0, $fa0, $fs0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
 ; LA64-NEXT:    st.h $a0, $fp, 0
 ; LA64-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
@@ -235,43 +266,51 @@ define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
 define half @test_fmul_reg(half %a, half %b) nounwind {
 ; LA32-LABEL: test_fmul_reg:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
-; LA32-NEXT:    fst.d $fs0, $sp, 0 # 8-byte Folded Spill
-; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    move $a0, $a1
-; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
 ; LA32-NEXT:    fmov.s $fs0, $fa0
-; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    fmov.s $fa0, $fa1
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fmul.s $fa0, $fa0, $fs0
+; LA32-NEXT:    fmov.s $fs1, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs0
+; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    fmul.s $fa0, $fa0, $fs1
 ; LA32-NEXT:    bl __truncsfhf2
-; LA32-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
-; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    lu12i.w $a1, -16
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_fmul_reg:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $sp, $sp, -32
 ; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
-; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
-; LA64-NEXT:    fst.d $fs0, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    move $fp, $a0
-; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    fmov.s $fs0, $fa0
+; LA64-NEXT:    fmov.s $fa0, $fa1
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fmov.s $fs0, $fa0
-; LA64-NEXT:    move $a0, $fp
+; LA64-NEXT:    fmov.s $fs1, $fa0
+; LA64-NEXT:    fmov.s $fa0, $fs0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fmul.s $fa0, $fa0, $fs0
+; LA64-NEXT:    fmul.s $fa0, $fa0, $fs1
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 32
 ; LA64-NEXT:    ret
@@ -285,20 +324,23 @@ define void @test_fmul_mem(ptr %p, ptr %q) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -32
 ; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
-; LA32-NEXT:    fst.d $fs0, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    ld.hu $s0, $a0, 0
-; LA32-NEXT:    ld.hu $a0, $a1, 0
+; LA32-NEXT:    ld.hu $a0, $a0, 0
+; LA32-NEXT:    ld.hu $a1, $a1, 0
+; LA32-NEXT:    movgr2fr.w $fs0, $a0
+; LA32-NEXT:    movgr2fr.w $fa0, $a1
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fmov.s $fs0, $fa0
-; LA32-NEXT:    move $a0, $s0
+; LA32-NEXT:    fmov.s $fs1, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs0
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fmul.s $fa0, $fa0, $fs0
+; LA32-NEXT:    fmul.s $fa0, $fa0, $fs1
 ; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
 ; LA32-NEXT:    st.h $a0, $fp, 0
-; LA32-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
 ; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 32
@@ -314,15 +356,17 @@ define void @test_fmul_mem(ptr %p, ptr %q) nounwind {
 ; LA64-NEXT:    move $fp, $a0
 ; LA64-NEXT:    ld.hu $s0, $a0, 0
 ; LA64-NEXT:    ld.hu $a0, $a1, 0
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
 ; LA64-NEXT:    fmov.s $fs0, $fa0
-; LA64-NEXT:    move $a0, $s0
+; LA64-NEXT:    movgr2fr.w $fa0, $s0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
 ; LA64-NEXT:    fmul.s $fa0, $fa0, $fs0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
 ; LA64-NEXT:    st.h $a0, $fp, 0
 ; LA64-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
@@ -347,6 +391,10 @@ define half @freeze_half_undef() nounwind {
 ; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fadd.s $fa0, $fa0, $fa0
 ; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    lu12i.w $a1, -16
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -363,6 +411,10 @@ define half @freeze_half_undef() nounwind {
 ; LA64-NEXT:    fadd.s $fa0, $fa0, $fa0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
 ; LA64-NEXT:    ret
@@ -379,6 +431,10 @@ define half @freeze_half_poison(half %maybe.poison) nounwind {
 ; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fadd.s $fa0, $fa0, $fa0
 ; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    lu12i.w $a1, -16
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -392,6 +448,10 @@ define half @freeze_half_poison(half %maybe.poison) nounwind {
 ; LA64-NEXT:    fadd.s $fa0, $fa0, $fa0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
 ; LA64-NEXT:    ret
@@ -418,8 +478,8 @@ define signext i32 @test_half_to_s32(half %a) nounwind {
 ; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64-NEXT:    movfr2gr.d $a0, $fa0
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
 ; LA64-NEXT:    ret
@@ -446,8 +506,8 @@ define zeroext i32 @test_half_to_s32_u32(half %a) nounwind {
 ; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64-NEXT:    movfr2gr.d $a0, $fa0
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
diff --git a/llvm/test/CodeGen/LoongArch/issue97975.ll b/llvm/test/CodeGen/LoongArch/issue97975.ll
new file mode 100644
index 0000000000000..98db8f0a08294
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/issue97975.ll
@@ -0,0 +1,444 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32D
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32D
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64D
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64D
+
+define half @f(half %a, half %b, half %c) {
+; LA32S-LABEL: f:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -32
+; LA32S-NEXT:    .cfi_def_cfa_offset 32
+; LA32S-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $s1, $sp, 16 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $s2, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    .cfi_offset 1, -4
+; LA32S-NEXT:    .cfi_offset 22, -8
+; LA32S-NEXT:    .cfi_offset 23, -12
+; LA32S-NEXT:    .cfi_offset 24, -16
+; LA32S-NEXT:    .cfi_offset 25, -20
+; LA32S-NEXT:    move $fp, $a2
+; LA32S-NEXT:    move $s0, $a1
+; LA32S-NEXT:    lu12i.w $a1, 15
+; LA32S-NEXT:    ori $s2, $a1, 4095
+; LA32S-NEXT:    and $a0, $a0, $s2
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    move $s1, $a0
+; LA32S-NEXT:    and $a0, $s0, $s2
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    move $a1, $a0
+; LA32S-NEXT:    move $a0, $s1
+; LA32S-NEXT:    bl __addsf3
+; LA32S-NEXT:    bl __truncsfhf2
+; LA32S-NEXT:    move $s0, $a0
+; LA32S-NEXT:    and $a0, $fp, $s2
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    move $fp, $a0
+; LA32S-NEXT:    and $a0, $s0, $s2
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    move $a1, $fp
+; LA32S-NEXT:    bl __addsf3
+; LA32S-NEXT:    bl __truncsfhf2
+; LA32S-NEXT:    ld.w $s2, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $s1, $sp, 16 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 32
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: f:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    .cfi_def_cfa_offset 16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    .cfi_offset 1, -4
+; LA32F-ILP32S-NEXT:    .cfi_offset 22, -8
+; LA32F-ILP32S-NEXT:    .cfi_offset 23, -12
+; LA32F-ILP32S-NEXT:    move $fp, $a2
+; LA32F-ILP32S-NEXT:    move $s0, $a0
+; LA32F-ILP32S-NEXT:    move $a0, $a1
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    fst.s $fa0, $sp, 0 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    move $a0, $s0
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    fld.s $fa1, $sp, 0 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    bl __truncsfhf2
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    fst.s $fa0, $sp, 0 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    move $a0, $fp
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    fld.s $fa1, $sp, 0 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    bl __truncsfhf2
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32S-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: f:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    .cfi_def_cfa_offset 16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    fst.s $fs0, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    fst.s $fs1, $sp, 4 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    fst.s $fs2, $sp, 0 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    .cfi_offset 1, -4
+; LA32F-ILP32D-NEXT:    .cfi_offset 56, -8
+; LA32F-ILP32D-NEXT:    .cfi_offset 57, -12
+; LA32F-ILP32D-NEXT:    .cfi_offset 58, -16
+; LA32F-ILP32D-NEXT:    fmov.s $fs0, $fa2
+; LA32F-ILP32D-NEXT:    fmov.s $fs1, $fa0
+; LA32F-ILP32D-NEXT:    fmov.s $fa0, $fa1
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    fmov.s $fs2, $fa0
+; LA32F-ILP32D-NEXT:    fmov.s $fa0, $fs1
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    fadd.s $fa0, $fa0, $fs2
+; LA32F-ILP32D-NEXT:    bl __truncsfhf2
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    fmov.s $fs1, $fa0
+; LA32F-ILP32D-NEXT:    fmov.s $fa0, $fs0
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    fadd.s $fa0, $fs1, $fa0
+; LA32F-ILP32D-NEXT:    bl __truncsfhf2
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    fld.s $fs2, $sp, 0 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    fld.s $fs1, $sp, 4 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    fld.s $fs0, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: f:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    .cfi_def_cfa_offset 16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    .cfi_offset 1, -4
+; LA32D-ILP32S-NEXT:    .cfi_offset 22, -8
+; LA32D-ILP32S-NEXT:    .cfi_offset 23, -12
+; LA32D-ILP32S-NEXT:    move $fp, $a2
+; LA32D-ILP32S-NEXT:    move $s0, $a0
+; LA32D-ILP32S-NEXT:    move $a0, $a1
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    fst.s $fa0, $sp, 0 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    move $a0, $s0
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    fld.s $fa1, $sp, 0 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    bl __truncsfhf2
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    fst.s $fa0, $sp, 0 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    move $a0, $fp
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    fld.s $fa1, $sp, 0 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    bl __truncsfhf2
+; LA32D-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32S-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: f:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -32
+; LA32D-ILP32D-NEXT:    .cfi_def_cfa_offset 32
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32D-ILP32D-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA32D-ILP32D-NEXT:    fst.d $fs2, $sp, 0 # 8-byte Folded Spill
+; LA32D-ILP32D-NEXT:    .cfi_offset 1, -4
+; LA32D-ILP32D-NEXT:    .cfi_offset 56, -16
+; LA32D-ILP32D-NEXT:    .cfi_offset 57, -24
+; LA32D-ILP32D-NEXT:    .cfi_offset 58, -32
+; LA32D-ILP32D-NEXT:    fmov.s $fs0, $fa2
+; LA32D-ILP32D-NEXT:    fmov.s $fs1, $fa0
+; LA32D-ILP32D-NEXT:    fmov.s $fa0, $fa1
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    fmov.s $fs2, $fa0
+; LA32D-ILP32D-NEXT:    fmov.s $fa0, $fs1
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    fadd.s $fa0, $fa0, $fs2
+; LA32D-ILP32D-NEXT:    bl __truncsfhf2
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    fmov.s $fs1, $fa0
+; LA32D-ILP32D-NEXT:    fmov.s $fa0, $fs0
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    fadd.s $fa0, $fs1, $fa0
+; LA32D-ILP32D-NEXT:    bl __truncsfhf2
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    fld.d $fs2, $sp, 0 # 8-byte Folded Reload
+; LA32D-ILP32D-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32D-ILP32D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 32
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: f:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -32
+; LA64S-NEXT:    .cfi_def_cfa_offset 32
+; LA64S-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64S-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64S-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    fst.d $fs2, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT:    .cfi_offset 1, -8
+; LA64S-NEXT:    .cfi_offset 56, -16
+; LA64S-NEXT:    .cfi_offset 57, -24
+; LA64S-NEXT:    .cfi_offset 58, -32
+; LA64S-NEXT:    fmov.s $fs0, $fa2
+; LA64S-NEXT:    fmov.s $fs1, $fa0
+; LA64S-NEXT:    fmov.s $fa0, $fa1
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    fmov.s $fs2, $fa0
+; LA64S-NEXT:    fmov.s $fa0, $fs1
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    fadd.s $fa0, $fa0, $fs2
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    fmov.s $fs1, $fa0
+; LA64S-NEXT:    fmov.s $fa0, $fs0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    fadd.s $fa0, $fs1, $fa0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64S-NEXT:    lu12i.w $a1, -16
+; LA64S-NEXT:    or $a0, $a0, $a1
+; LA64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64S-NEXT:    fld.d $fs2, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64S-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 32
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: f:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -32
+; LA64F-LP64S-NEXT:    .cfi_def_cfa_offset 32
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    .cfi_offset 1, -8
+; LA64F-LP64S-NEXT:    .cfi_offset 22, -16
+; LA64F-LP64S-NEXT:    .cfi_offset 23, -24
+; LA64F-LP64S-NEXT:    move $fp, $a2
+; LA64F-LP64S-NEXT:    move $s0, $a0
+; LA64F-LP64S-NEXT:    move $a0, $a1
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    fst.s $fa0, $sp, 4 # 4-byte Folded Spill
+; LA64F-LP64S-NEXT:    move $a0, $s0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    fld.s $fa1, $sp, 4 # 4-byte Folded Reload
+; LA64F-LP64S-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    fst.s $fa0, $sp, 4 # 4-byte Folded Spill
+; LA64F-LP64S-NEXT:    move $a0, $fp
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    fld.s $fa1, $sp, 4 # 4-byte Folded Reload
+; LA64F-LP64S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64S-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 32
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: f:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -32
+; LA64F-LP64D-NEXT:    .cfi_def_cfa_offset 32
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    fst.d $fs2, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    .cfi_offset 1, -8
+; LA64F-LP64D-NEXT:    .cfi_offset 56, -16
+; LA64F-LP64D-NEXT:    .cfi_offset 57, -24
+; LA64F-LP64D-NEXT:    .cfi_offset 58, -32
+; LA64F-LP64D-NEXT:    fmov.s $fs0, $fa2
+; LA64F-LP64D-NEXT:    fmov.s $fs1, $fa0
+; LA64F-LP64D-NEXT:    fmov.s $fa0, $fa1
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    fmov.s $fs2, $fa0
+; LA64F-LP64D-NEXT:    fmov.s $fa0, $fs1
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    fadd.s $fa0, $fa0, $fs2
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    fmov.s $fs1, $fa0
+; LA64F-LP64D-NEXT:    fmov.s $fa0, $fs0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    fadd.s $fa0, $fs1, $fa0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    fld.d $fs2, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 32
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: f:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -32
+; LA64D-LP64S-NEXT:    .cfi_def_cfa_offset 32
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    .cfi_offset 1, -8
+; LA64D-LP64S-NEXT:    .cfi_offset 22, -16
+; LA64D-LP64S-NEXT:    .cfi_offset 23, -24
+; LA64D-LP64S-NEXT:    move $fp, $a2
+; LA64D-LP64S-NEXT:    move $s0, $a0
+; LA64D-LP64S-NEXT:    move $a0, $a1
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    fst.s $fa0, $sp, 4 # 4-byte Folded Spill
+; LA64D-LP64S-NEXT:    move $a0, $s0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    fld.s $fa1, $sp, 4 # 4-byte Folded Reload
+; LA64D-LP64S-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    fst.s $fa0, $sp, 4 # 4-byte Folded Spill
+; LA64D-LP64S-NEXT:    move $a0, $fp
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    fld.s $fa1, $sp, 4 # 4-byte Folded Reload
+; LA64D-LP64S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64S-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 32
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: f:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -32
+; LA64D-LP64D-NEXT:    .cfi_def_cfa_offset 32
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    fst.d $fs2, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    .cfi_offset 1, -8
+; LA64D-LP64D-NEXT:    .cfi_offset 56, -16
+; LA64D-LP64D-NEXT:    .cfi_offset 57, -24
+; LA64D-LP64D-NEXT:    .cfi_offset 58, -32
+; LA64D-LP64D-NEXT:    fmov.s $fs0, $fa2
+; LA64D-LP64D-NEXT:    fmov.s $fs1, $fa0
+; LA64D-LP64D-NEXT:    fmov.s $fa0, $fa1
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    fmov.s $fs2, $fa0
+; LA64D-LP64D-NEXT:    fmov.s $fa0, $fs1
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    fadd.s $fa0, $fa0, $fs2
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    fmov.s $fs1, $fa0
+; LA64D-LP64D-NEXT:    fmov.s $fa0, $fs0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    fadd.s $fa0, $fs1, $fa0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    fld.d $fs2, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 32
+; LA64D-LP64D-NEXT:    ret
+    %d = fadd half %a, %b
+    %e = fadd half %d, %c
+    ret half %e
+}
diff --git a/llvm/test/CodeGen/LoongArch/issue97981.ll b/llvm/test/CodeGen/LoongArch/issue97981.ll
new file mode 100644
index 0000000000000..856cd46de8d3d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/issue97981.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32D
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32D
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64D
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64D
+
+define half @to_half(i16 %bits) {
+; LA32S-LABEL: to_half:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: to_half:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: to_half:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: to_half:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: to_half:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: to_half:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    lu12i.w $a1, -16
+; LA64S-NEXT:    or $a0, $a0, $a1
+; LA64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: to_half:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: to_half:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: to_half:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: to_half:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    ret
+    %f = bitcast i16 %bits to half
+    ret half %f
+}
+
+define i16 @from_half(half %f) {
+; LA32S-LABEL: from_half:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: from_half:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: from_half:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: from_half:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: from_half:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: from_half:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: from_half:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: from_half:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: from_half:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: from_half:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    ret
+    %bits = bitcast half %f to i16
+    ret i16 %bits
+}



More information about the llvm-commits mailing list