[clang] [llvm] [LoongArch] Add support for half-precision floating-point type (PR #141564)

via llvm-commits llvm-commits at lists.llvm.org
Tue May 27 01:44:23 PDT 2025


https://github.com/Ami-zhang created https://github.com/llvm/llvm-project/pull/141564

This PR contains 3 commits:
1. Updated the FP16 implementation to pass arguments via FPR instead of the original GPR.
2. Added support for the _Float16 type and fixed 2 related issues.
3. Added support for the __bf16 type.

>From 41549611b44d94952a9a01b518e75dfcda7a1aea Mon Sep 17 00:00:00 2001
From: Ami-zhang <zhanglimin at loongson.cn>
Date: Thu, 24 Apr 2025 19:43:20 +0800
Subject: [PATCH 1/3] [LoongArch] Custom lower FP_TO_FP16 and FP16_TO_FP to
 correct ABI of libcall

This change passes 'half' in the lower 16 bits of an f32 value with F/D
ABI. LoongArch currently lacks a hardware extension for the fp16 data type,
and the ABI manual now documents the half-precision floating-point type
following FP calling conventions.

Previously, we maintained the 'half' type in its 16-bit format between
operations. Regardless of whether the F extension is enabled, the value
would be passed in the lower 16 bits of a GPR in its 'half' format.

With this patch, depending on the ABI in use, the value will be passed
either in an FPR or a GPR in 'half' format. This ensures consistency with
the bits location when the fp16 hardware extension is enabled.

Co-authored-by: WANG Rui <wangrui at loongson.cn>
---
 .../LoongArch/LoongArchISelLowering.cpp       |  138 +-
 .../Target/LoongArch/LoongArchISelLowering.h  |   24 +
 .../CodeGen/LoongArch/calling-conv-half.ll    | 1626 +++++++++++++++++
 llvm/test/CodeGen/LoongArch/fp16-promote.ll   |  202 +-
 llvm/test/CodeGen/LoongArch/issue97975.ll     |  438 +++++
 llvm/test/CodeGen/LoongArch/issue97981.ll     |  127 ++
 6 files changed, 2481 insertions(+), 74 deletions(-)
 create mode 100644 llvm/test/CodeGen/LoongArch/calling-conv-half.ll
 create mode 100644 llvm/test/CodeGen/LoongArch/issue97975.ll
 create mode 100644 llvm/test/CodeGen/LoongArch/issue97981.ll

diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 9f5c94ddea44f..c7b2a1a8ffbf8 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -199,8 +199,10 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
     setOperationAction(ISD::FPOW, MVT::f32, Expand);
     setOperationAction(ISD::FREM, MVT::f32, Expand);
-    setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
-    setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
+    setOperationAction(ISD::FP16_TO_FP, MVT::f32,
+                       Subtarget.isSoftFPABI() ? LibCall : Custom);
+    setOperationAction(ISD::FP_TO_FP16, MVT::f32,
+                       Subtarget.isSoftFPABI() ? LibCall : Custom);
 
     if (Subtarget.is64Bit())
       setOperationAction(ISD::FRINT, MVT::f32, Legal);
@@ -239,7 +241,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::FPOW, MVT::f64, Expand);
     setOperationAction(ISD::FREM, MVT::f64, Expand);
     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
-    setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
+    setOperationAction(ISD::FP_TO_FP16, MVT::f64,
+                       Subtarget.isSoftFPABI() ? LibCall : Custom);
 
     if (Subtarget.is64Bit())
       setOperationAction(ISD::FRINT, MVT::f64, Legal);
@@ -490,6 +493,10 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
     return lowerPREFETCH(Op, DAG);
   case ISD::SELECT:
     return lowerSELECT(Op, DAG);
+  case ISD::FP_TO_FP16:
+    return lowerFP_TO_FP16(Op, DAG);
+  case ISD::FP16_TO_FP:
+    return lowerFP16_TO_FP(Op, DAG);
   }
   return SDValue();
 }
@@ -2242,6 +2249,40 @@ SDValue LoongArchTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
   return SDValue();
 }
 
+SDValue LoongArchTargetLowering::lowerFP_TO_FP16(SDValue Op,
+                                                 SelectionDAG &DAG) const {
+  // Custom lower to ensure the libcall return is passed in an FPR on hard
+  // float ABIs.
+  SDLoc DL(Op);
+  MakeLibCallOptions CallOptions;
+  SDValue Op0 = Op.getOperand(0);
+  SDValue Chain = SDValue();
+  RTLIB::Libcall LC = RTLIB::getFPROUND(Op0.getValueType(), MVT::f16);
+  SDValue Res;
+  std::tie(Res, Chain) =
+      makeLibCall(DAG, LC, MVT::f32, Op0, CallOptions, DL, Chain);
+  if (Subtarget.is64Bit())
+    return DAG.getNode(LoongArchISD::MOVFR2GR_S_LA64, DL, MVT::i64, Res);
+  return DAG.getBitcast(MVT::i32, Res);
+}
+
+SDValue LoongArchTargetLowering::lowerFP16_TO_FP(SDValue Op,
+                                                 SelectionDAG &DAG) const {
+  // Custom lower to ensure the libcall argument is passed in an FPR on hard
+  // float ABIs.
+  SDLoc DL(Op);
+  MakeLibCallOptions CallOptions;
+  SDValue Op0 = Op.getOperand(0);
+  SDValue Chain = SDValue();
+  SDValue Arg = Subtarget.is64Bit() ? DAG.getNode(LoongArchISD::MOVGR2FR_W_LA64,
+                                                  DL, MVT::f32, Op0)
+                                    : DAG.getBitcast(MVT::f32, Op0);
+  SDValue Res;
+  std::tie(Res, Chain) = makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg,
+                                     CallOptions, DL, Chain);
+  return Res;
+}
+
 static bool isConstantOrUndef(const SDValue Op) {
   if (Op->isUndef())
     return true;
@@ -3841,6 +3882,8 @@ void LoongArchTargetLowering::ReplaceNodeResults(
     EVT FVT = EVT::getFloatingPointVT(N->getValueSizeInBits(0));
     if (getTypeAction(*DAG.getContext(), Src.getValueType()) !=
         TargetLowering::TypeSoftenFloat) {
+      if (!isTypeLegal(Src.getValueType()))
+        return;
       if (Src.getValueType() == MVT::f16)
         Src = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
       SDValue Dst = DAG.getNode(LoongArchISD::FTINT, DL, FVT, Src);
@@ -5289,6 +5332,33 @@ performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
   return SDValue();
 }
 
+static SDValue performMOVGR2FR_WCombine(SDNode *N, SelectionDAG &DAG,
+                                        TargetLowering::DAGCombinerInfo &DCI,
+                                        const LoongArchSubtarget &Subtarget) {
+  // If the input to MOVGR2FR_W_LA64 is just MOVFR2GR_S_LA64 the the
+  // conversion is unnecessary and can be replaced with the
+  // MOVFR2GR_S_LA64 operand.
+  SDValue Op0 = N->getOperand(0);
+  if (Op0.getOpcode() == LoongArchISD::MOVFR2GR_S_LA64)
+    return Op0.getOperand(0);
+  return SDValue();
+}
+
+static SDValue performMOVFR2GR_SCombine(SDNode *N, SelectionDAG &DAG,
+                                        TargetLowering::DAGCombinerInfo &DCI,
+                                        const LoongArchSubtarget &Subtarget) {
+  // If the input to MOVFR2GR_S_LA64 is just MOVGR2FR_W_LA64 then the
+  // conversion is unnecessary and can be replaced with the MOVGR2FR_W_LA64
+  // operand.
+  SDValue Op0 = N->getOperand(0);
+  MVT VT = N->getSimpleValueType(0);
+  if (Op0->getOpcode() == LoongArchISD::MOVGR2FR_W_LA64) {
+    assert(Op0.getOperand(0).getValueType() == VT && "Unexpected value type!");
+    return Op0.getOperand(0);
+  }
+  return SDValue();
+}
+
 SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
                                                    DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -5307,6 +5377,10 @@ SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
     return performBITREV_WCombine(N, DAG, DCI, Subtarget);
   case ISD::INTRINSIC_WO_CHAIN:
     return performINTRINSIC_WO_CHAINCombine(N, DAG, DCI, Subtarget);
+  case LoongArchISD::MOVGR2FR_W_LA64:
+    return performMOVGR2FR_WCombine(N, DAG, DCI, Subtarget);
+  case LoongArchISD::MOVFR2GR_S_LA64:
+    return performMOVFR2GR_SCombine(N, DAG, DCI, Subtarget);
   }
   return SDValue();
 }
@@ -7633,3 +7707,61 @@ LoongArchTargetLowering::getPreferredVectorAction(MVT VT) const {
 
   return TargetLoweringBase::getPreferredVectorAction(VT);
 }
+
+bool LoongArchTargetLowering::splitValueIntoRegisterParts(
+    SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
+    unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
+  bool IsABIRegCopy = CC.has_value();
+  EVT ValueVT = Val.getValueType();
+
+  if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
+    // Cast the f16 to i16, extend to i32, pad with ones to make a float
+    // nan, and cast to f32.
+    Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
+    Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
+    Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
+                      DAG.getConstant(0xFFFF0000, DL, MVT::i32));
+    Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
+    Parts[0] = Val;
+    return true;
+  }
+
+  return false;
+}
+
+SDValue LoongArchTargetLowering::joinRegisterPartsIntoValue(
+    SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
+    MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
+  bool IsABIRegCopy = CC.has_value();
+
+  if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
+    SDValue Val = Parts[0];
+
+    // Cast the f32 to i32, truncate to i16, and cast back to f16.
+    Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
+    Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
+    Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
+    return Val;
+  }
+
+  return SDValue();
+}
+
+MVT LoongArchTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
+                                                           CallingConv::ID CC,
+                                                           EVT VT) const {
+  // Use f32 to pass f16.
+  if (VT == MVT::f16 && Subtarget.hasBasicF())
+    return MVT::f32;
+
+  return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
+}
+
+unsigned LoongArchTargetLowering::getNumRegistersForCallingConv(
+    LLVMContext &Context, CallingConv::ID CC, EVT VT) const {
+  // Use f32 to pass f16.
+  if (VT == MVT::f16 && Subtarget.hasBasicF())
+    return 1;
+
+  return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
+}
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 6bf295984dfc5..8c00ec75db94b 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -361,6 +361,8 @@ class LoongArchTargetLowering : public TargetLowering {
   SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) const;
 
   bool isFPImmLegal(const APFloat &Imm, EVT VT,
                     bool ForCodeSize) const override;
@@ -385,6 +387,28 @@ class LoongArchTargetLowering : public TargetLowering {
       const SmallVectorImpl<CCValAssign> &ArgLocs) const;
 
   bool softPromoteHalfType() const override { return true; }
+
+  bool
+  splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
+                              SDValue *Parts, unsigned NumParts, MVT PartVT,
+                              std::optional<CallingConv::ID> CC) const override;
+
+  SDValue
+  joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
+                             const SDValue *Parts, unsigned NumParts,
+                             MVT PartVT, EVT ValueVT,
+                             std::optional<CallingConv::ID> CC) const override;
+
+  /// Return the register type for a given MVT, ensuring vectors are treated
+  /// as a series of gpr sized integers.
+  MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
+                                    EVT VT) const override;
+
+  /// Return the number of registers for a given MVT, ensuring vectors are
+  /// treated as a series of gpr sized integers.
+  unsigned getNumRegistersForCallingConv(LLVMContext &Context,
+                                         CallingConv::ID CC,
+                                         EVT VT) const override;
 };
 
 } // end namespace llvm
diff --git a/llvm/test/CodeGen/LoongArch/calling-conv-half.ll b/llvm/test/CodeGen/LoongArch/calling-conv-half.ll
new file mode 100644
index 0000000000000..6bdb2a5190380
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-half.ll
@@ -0,0 +1,1626 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32D
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32D
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64D
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64D
+
+define i32 @callee_half_in_fregs(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, half %i) nounwind {
+; LA32S-LABEL: callee_half_in_fregs:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT:    ld.hu $a1, $sp, 16
+; LA32S-NEXT:    move $fp, $a0
+; LA32S-NEXT:    move $a0, $a1
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    bl __fixsfsi
+; LA32S-NEXT:    add.w $a0, $fp, $a0
+; LA32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: callee_half_in_fregs:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    ld.hu $a1, $sp, 16
+; LA32F-ILP32S-NEXT:    move $fp, $a0
+; LA32F-ILP32S-NEXT:    move $a0, $a1
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: callee_half_in_fregs:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    move $fp, $a0
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: callee_half_in_fregs:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    ld.hu $a1, $sp, 16
+; LA32D-ILP32S-NEXT:    move $fp, $a0
+; LA32D-ILP32S-NEXT:    move $a0, $a1
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: callee_half_in_fregs:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    move $fp, $a0
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: callee_half_in_fregs:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT:    move $fp, $a0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64S-NEXT:    add.w $a0, $fp, $a0
+; LA64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: callee_half_in_fregs:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    ld.hu $a1, $sp, 16
+; LA64F-LP64S-NEXT:    move $fp, $a0
+; LA64F-LP64S-NEXT:    move $a0, $a1
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: callee_half_in_fregs:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    move $fp, $a0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: callee_half_in_fregs:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    ld.hu $a1, $sp, 16
+; LA64D-LP64S-NEXT:    move $fp, $a0
+; LA64D-LP64S-NEXT:    move $a0, $a1
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: callee_half_in_fregs:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    move $fp, $a0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = fptosi half %i to i32
+  %2 = add i32 %a, %1
+  ret i32 %2
+}
+
+define i32 @caller_half_in_fregs() nounwind {
+; LA32S-LABEL: caller_half_in_fregs:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    lu12i.w $t0, 4
+; LA32S-NEXT:    ori $a0, $zero, 1
+; LA32S-NEXT:    ori $a1, $zero, 2
+; LA32S-NEXT:    ori $a2, $zero, 3
+; LA32S-NEXT:    ori $a3, $zero, 4
+; LA32S-NEXT:    ori $a4, $zero, 5
+; LA32S-NEXT:    ori $a5, $zero, 6
+; LA32S-NEXT:    ori $a6, $zero, 7
+; LA32S-NEXT:    ori $a7, $zero, 8
+; LA32S-NEXT:    st.w $t0, $sp, 0
+; LA32S-NEXT:    bl callee_half_in_fregs
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: caller_half_in_fregs:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    lu12i.w $t0, -12
+; LA32F-ILP32S-NEXT:    ori $a0, $zero, 1
+; LA32F-ILP32S-NEXT:    ori $a1, $zero, 2
+; LA32F-ILP32S-NEXT:    ori $a2, $zero, 3
+; LA32F-ILP32S-NEXT:    ori $a3, $zero, 4
+; LA32F-ILP32S-NEXT:    ori $a4, $zero, 5
+; LA32F-ILP32S-NEXT:    ori $a5, $zero, 6
+; LA32F-ILP32S-NEXT:    ori $a6, $zero, 7
+; LA32F-ILP32S-NEXT:    ori $a7, $zero, 8
+; LA32F-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32F-ILP32S-NEXT:    bl callee_half_in_fregs
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: caller_half_in_fregs:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA32F-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA32F-ILP32D-NEXT:    ori $a0, $zero, 1
+; LA32F-ILP32D-NEXT:    ori $a1, $zero, 2
+; LA32F-ILP32D-NEXT:    ori $a2, $zero, 3
+; LA32F-ILP32D-NEXT:    ori $a3, $zero, 4
+; LA32F-ILP32D-NEXT:    ori $a4, $zero, 5
+; LA32F-ILP32D-NEXT:    ori $a5, $zero, 6
+; LA32F-ILP32D-NEXT:    ori $a6, $zero, 7
+; LA32F-ILP32D-NEXT:    ori $a7, $zero, 8
+; LA32F-ILP32D-NEXT:    bl callee_half_in_fregs
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: caller_half_in_fregs:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    lu12i.w $t0, -12
+; LA32D-ILP32S-NEXT:    ori $a0, $zero, 1
+; LA32D-ILP32S-NEXT:    ori $a1, $zero, 2
+; LA32D-ILP32S-NEXT:    ori $a2, $zero, 3
+; LA32D-ILP32S-NEXT:    ori $a3, $zero, 4
+; LA32D-ILP32S-NEXT:    ori $a4, $zero, 5
+; LA32D-ILP32S-NEXT:    ori $a5, $zero, 6
+; LA32D-ILP32S-NEXT:    ori $a6, $zero, 7
+; LA32D-ILP32S-NEXT:    ori $a7, $zero, 8
+; LA32D-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32D-ILP32S-NEXT:    bl callee_half_in_fregs
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: caller_half_in_fregs:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA32D-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA32D-ILP32D-NEXT:    ori $a0, $zero, 1
+; LA32D-ILP32D-NEXT:    ori $a1, $zero, 2
+; LA32D-ILP32D-NEXT:    ori $a2, $zero, 3
+; LA32D-ILP32D-NEXT:    ori $a3, $zero, 4
+; LA32D-ILP32D-NEXT:    ori $a4, $zero, 5
+; LA32D-ILP32D-NEXT:    ori $a5, $zero, 6
+; LA32D-ILP32D-NEXT:    ori $a6, $zero, 7
+; LA32D-ILP32D-NEXT:    ori $a7, $zero, 8
+; LA32D-ILP32D-NEXT:    bl callee_half_in_fregs
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: caller_half_in_fregs:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA64S-NEXT:    ori $a0, $zero, 1
+; LA64S-NEXT:    ori $a1, $zero, 2
+; LA64S-NEXT:    ori $a2, $zero, 3
+; LA64S-NEXT:    ori $a3, $zero, 4
+; LA64S-NEXT:    ori $a4, $zero, 5
+; LA64S-NEXT:    ori $a5, $zero, 6
+; LA64S-NEXT:    ori $a6, $zero, 7
+; LA64S-NEXT:    ori $a7, $zero, 8
+; LA64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_fregs)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: caller_half_in_fregs:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    lu12i.w $t0, -12
+; LA64F-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64F-LP64S-NEXT:    ori $a0, $zero, 1
+; LA64F-LP64S-NEXT:    ori $a1, $zero, 2
+; LA64F-LP64S-NEXT:    ori $a2, $zero, 3
+; LA64F-LP64S-NEXT:    ori $a3, $zero, 4
+; LA64F-LP64S-NEXT:    ori $a4, $zero, 5
+; LA64F-LP64S-NEXT:    ori $a5, $zero, 6
+; LA64F-LP64S-NEXT:    ori $a6, $zero, 7
+; LA64F-LP64S-NEXT:    ori $a7, $zero, 8
+; LA64F-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_fregs)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: caller_half_in_fregs:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA64F-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA64F-LP64D-NEXT:    ori $a0, $zero, 1
+; LA64F-LP64D-NEXT:    ori $a1, $zero, 2
+; LA64F-LP64D-NEXT:    ori $a2, $zero, 3
+; LA64F-LP64D-NEXT:    ori $a3, $zero, 4
+; LA64F-LP64D-NEXT:    ori $a4, $zero, 5
+; LA64F-LP64D-NEXT:    ori $a5, $zero, 6
+; LA64F-LP64D-NEXT:    ori $a6, $zero, 7
+; LA64F-LP64D-NEXT:    ori $a7, $zero, 8
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_in_fregs)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: caller_half_in_fregs:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    lu12i.w $t0, -12
+; LA64D-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64D-LP64S-NEXT:    ori $a0, $zero, 1
+; LA64D-LP64S-NEXT:    ori $a1, $zero, 2
+; LA64D-LP64S-NEXT:    ori $a2, $zero, 3
+; LA64D-LP64S-NEXT:    ori $a3, $zero, 4
+; LA64D-LP64S-NEXT:    ori $a4, $zero, 5
+; LA64D-LP64S-NEXT:    ori $a5, $zero, 6
+; LA64D-LP64S-NEXT:    ori $a6, $zero, 7
+; LA64D-LP64S-NEXT:    ori $a7, $zero, 8
+; LA64D-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_fregs)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: caller_half_in_fregs:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA64D-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA64D-LP64D-NEXT:    ori $a0, $zero, 1
+; LA64D-LP64D-NEXT:    ori $a1, $zero, 2
+; LA64D-LP64D-NEXT:    ori $a2, $zero, 3
+; LA64D-LP64D-NEXT:    ori $a3, $zero, 4
+; LA64D-LP64D-NEXT:    ori $a4, $zero, 5
+; LA64D-LP64D-NEXT:    ori $a5, $zero, 6
+; LA64D-LP64D-NEXT:    ori $a6, $zero, 7
+; LA64D-LP64D-NEXT:    ori $a7, $zero, 8
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_in_fregs)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = call i32 @callee_half_in_fregs(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 2.0)
+  ret i32 %1
+}
+
+define i32 @callee_half_in_gregs(half %a, half %b, half %c, half %d, half %e, half %f, half %g, half %h, half %i, i32 %j) nounwind {
+; LA32S-LABEL: callee_half_in_gregs:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT:    ld.w $fp, $sp, 20
+; LA32S-NEXT:    ld.hu $a0, $sp, 16
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    bl __fixsfsi
+; LA32S-NEXT:    add.w $a0, $fp, $a0
+; LA32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: callee_half_in_gregs:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 20
+; LA32F-ILP32S-NEXT:    ld.hu $a0, $sp, 16
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: callee_half_in_gregs:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    move $fp, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: callee_half_in_gregs:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    ld.w $fp, $sp, 20
+; LA32D-ILP32S-NEXT:    ld.hu $a0, $sp, 16
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: callee_half_in_gregs:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    move $fp, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: callee_half_in_gregs:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT:    move $fp, $a1
+; LA64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64S-NEXT:    add.w $a0, $fp, $a0
+; LA64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: callee_half_in_gregs:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    ld.w $fp, $sp, 24
+; LA64F-LP64S-NEXT:    ld.hu $a0, $sp, 16
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: callee_half_in_gregs:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    move $fp, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: callee_half_in_gregs:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    ld.w $fp, $sp, 24
+; LA64D-LP64S-NEXT:    ld.hu $a0, $sp, 16
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: callee_half_in_gregs:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    move $fp, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = fptosi half %i to i32
+  %2 = add i32 %j, %1
+  ret i32 %2
+}
+
+define i32 @caller_half_in_gregs() nounwind {
+; LA32S-LABEL: caller_half_in_gregs:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    ori $a0, $zero, 10
+; LA32S-NEXT:    st.w $a0, $sp, 4
+; LA32S-NEXT:    lu12i.w $a1, 4
+; LA32S-NEXT:    ori $t0, $a1, 2176
+; LA32S-NEXT:    lu12i.w $a0, 3
+; LA32S-NEXT:    ori $a0, $a0, 3072
+; LA32S-NEXT:    ori $a2, $a1, 512
+; LA32S-NEXT:    ori $a3, $a1, 1024
+; LA32S-NEXT:    ori $a4, $a1, 1280
+; LA32S-NEXT:    ori $a5, $a1, 1536
+; LA32S-NEXT:    ori $a6, $a1, 1792
+; LA32S-NEXT:    ori $a7, $a1, 2048
+; LA32S-NEXT:    st.w $t0, $sp, 0
+; LA32S-NEXT:    bl callee_half_in_gregs
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: caller_half_in_gregs:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    ori $a0, $zero, 10
+; LA32F-ILP32S-NEXT:    st.w $a0, $sp, 4
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -12
+; LA32F-ILP32S-NEXT:    ori $t0, $a1, 2176
+; LA32F-ILP32S-NEXT:    lu12i.w $a0, -13
+; LA32F-ILP32S-NEXT:    ori $a0, $a0, 3072
+; LA32F-ILP32S-NEXT:    ori $a2, $a1, 512
+; LA32F-ILP32S-NEXT:    ori $a3, $a1, 1024
+; LA32F-ILP32S-NEXT:    ori $a4, $a1, 1280
+; LA32F-ILP32S-NEXT:    ori $a5, $a1, 1536
+; LA32F-ILP32S-NEXT:    ori $a6, $a1, 1792
+; LA32F-ILP32S-NEXT:    ori $a7, $a1, 2048
+; LA32F-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32F-ILP32S-NEXT:    bl callee_half_in_gregs
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: caller_half_in_gregs:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA32F-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA32F-ILP32D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA32F-ILP32D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI3_2)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA32F-ILP32D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI3_3)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA32F-ILP32D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI3_4)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA32F-ILP32D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI3_5)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA32F-ILP32D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI3_6)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA32F-ILP32D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI3_7)
+; LA32F-ILP32D-NEXT:    lu12i.w $a0, -12
+; LA32F-ILP32D-NEXT:    ori $a0, $a0, 2176
+; LA32F-ILP32D-NEXT:    ori $a1, $zero, 10
+; LA32F-ILP32D-NEXT:    bl callee_half_in_gregs
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: caller_half_in_gregs:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    ori $a0, $zero, 10
+; LA32D-ILP32S-NEXT:    st.w $a0, $sp, 4
+; LA32D-ILP32S-NEXT:    lu12i.w $a1, -12
+; LA32D-ILP32S-NEXT:    ori $t0, $a1, 2176
+; LA32D-ILP32S-NEXT:    lu12i.w $a0, -13
+; LA32D-ILP32S-NEXT:    ori $a0, $a0, 3072
+; LA32D-ILP32S-NEXT:    ori $a2, $a1, 512
+; LA32D-ILP32S-NEXT:    ori $a3, $a1, 1024
+; LA32D-ILP32S-NEXT:    ori $a4, $a1, 1280
+; LA32D-ILP32S-NEXT:    ori $a5, $a1, 1536
+; LA32D-ILP32S-NEXT:    ori $a6, $a1, 1792
+; LA32D-ILP32S-NEXT:    ori $a7, $a1, 2048
+; LA32D-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32D-ILP32S-NEXT:    bl callee_half_in_gregs
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: caller_half_in_gregs:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA32D-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA32D-ILP32D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA32D-ILP32D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI3_2)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA32D-ILP32D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI3_3)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA32D-ILP32D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI3_4)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA32D-ILP32D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI3_5)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA32D-ILP32D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI3_6)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA32D-ILP32D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI3_7)
+; LA32D-ILP32D-NEXT:    lu12i.w $a0, -12
+; LA32D-ILP32D-NEXT:    ori $a0, $a0, 2176
+; LA32D-ILP32D-NEXT:    ori $a1, $zero, 10
+; LA32D-ILP32D-NEXT:    bl callee_half_in_gregs
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: caller_half_in_gregs:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64S-NEXT:    fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA64S-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA64S-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA64S-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA64S-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA64S-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA64S-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_8)
+; LA64S-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
+; LA64S-NEXT:    movfr2gr.s $a0, $ft0
+; LA64S-NEXT:    ori $a1, $zero, 10
+; LA64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_gregs)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: caller_half_in_gregs:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -32
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
+; LA64F-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64F-LP64S-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI3_2)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a1, %pc_lo12(.LCPI3_2)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a1, $fa1
+; LA64F-LP64S-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_3)
+; LA64F-LP64S-NEXT:    fld.s $fa1, $a2, %pc_lo12(.LCPI3_3)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-LP64S-NEXT:    pcalau12i $a3, %pc_hi20(.LCPI3_4)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a3, %pc_lo12(.LCPI3_4)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a3, $fa1
+; LA64F-LP64S-NEXT:    pcalau12i $a4, %pc_hi20(.LCPI3_5)
+; LA64F-LP64S-NEXT:    fld.s $fa1, $a4, %pc_lo12(.LCPI3_5)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a4, $fa0
+; LA64F-LP64S-NEXT:    pcalau12i $a5, %pc_hi20(.LCPI3_6)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a5, %pc_lo12(.LCPI3_6)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a5, $fa1
+; LA64F-LP64S-NEXT:    ori $a6, $zero, 10
+; LA64F-LP64S-NEXT:    st.d $a6, $sp, 8
+; LA64F-LP64S-NEXT:    movfr2gr.s $a6, $fa0
+; LA64F-LP64S-NEXT:    pcalau12i $a7, %pc_hi20(.LCPI3_7)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a7, %pc_lo12(.LCPI3_7)
+; LA64F-LP64S-NEXT:    lu12i.w $a7, -12
+; LA64F-LP64S-NEXT:    ori $t0, $a7, 2176
+; LA64F-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64F-LP64S-NEXT:    movfr2gr.s $a7, $fa0
+; LA64F-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_gregs)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 32
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: caller_half_in_gregs:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64F-LP64D-NEXT:    fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64F-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA64F-LP64D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA64F-LP64D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA64F-LP64D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA64F-LP64D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA64F-LP64D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA64F-LP64D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_8)
+; LA64F-LP64D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $ft0
+; LA64F-LP64D-NEXT:    ori $a1, $zero, 10
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_in_gregs)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: caller_half_in_gregs:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -32
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
+; LA64D-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64D-LP64S-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI3_2)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a1, %pc_lo12(.LCPI3_2)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a1, $fa1
+; LA64D-LP64S-NEXT:    pcalau12i $a2, %pc_hi20(.LCPI3_3)
+; LA64D-LP64S-NEXT:    fld.s $fa1, $a2, %pc_lo12(.LCPI3_3)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a2, $fa0
+; LA64D-LP64S-NEXT:    pcalau12i $a3, %pc_hi20(.LCPI3_4)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a3, %pc_lo12(.LCPI3_4)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a3, $fa1
+; LA64D-LP64S-NEXT:    pcalau12i $a4, %pc_hi20(.LCPI3_5)
+; LA64D-LP64S-NEXT:    fld.s $fa1, $a4, %pc_lo12(.LCPI3_5)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a4, $fa0
+; LA64D-LP64S-NEXT:    pcalau12i $a5, %pc_hi20(.LCPI3_6)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a5, %pc_lo12(.LCPI3_6)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a5, $fa1
+; LA64D-LP64S-NEXT:    ori $a6, $zero, 10
+; LA64D-LP64S-NEXT:    st.d $a6, $sp, 8
+; LA64D-LP64S-NEXT:    movfr2gr.s $a6, $fa0
+; LA64D-LP64S-NEXT:    pcalau12i $a7, %pc_hi20(.LCPI3_7)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a7, %pc_lo12(.LCPI3_7)
+; LA64D-LP64S-NEXT:    lu12i.w $a7, -12
+; LA64D-LP64S-NEXT:    ori $t0, $a7, 2176
+; LA64D-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64D-LP64S-NEXT:    movfr2gr.s $a7, $fa0
+; LA64D-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_in_gregs)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 32
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: caller_half_in_gregs:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64D-LP64D-NEXT:    fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64D-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA64D-LP64D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA64D-LP64D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA64D-LP64D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA64D-LP64D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA64D-LP64D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA64D-LP64D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_8)
+; LA64D-LP64D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $ft0
+; LA64D-LP64D-NEXT:    ori $a1, $zero, 10
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_in_gregs)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = call i32 @callee_half_in_gregs(half 1.0, half 2.0, half 3.0, half 4.0, half 5.0, half 6.0, half 7.0, half 8.0, half 9.0, i32 10)
+  ret i32 %1
+}
+
+define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, half %i, half %j, half %k, half %l, half %m, half %n, half %o, half %p, half %q) nounwind {
+; LA32S-LABEL: callee_half_on_stack:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT:    ld.hu $a0, $sp, 48
+; LA32S-NEXT:    move $fp, $a7
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    bl __fixsfsi
+; LA32S-NEXT:    add.w $a0, $fp, $a0
+; LA32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: callee_half_on_stack:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    ld.hu $a0, $sp, 48
+; LA32F-ILP32S-NEXT:    move $fp, $a7
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: callee_half_on_stack:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    ld.hu $a0, $sp, 16
+; LA32F-ILP32D-NEXT:    move $fp, $a7
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32F-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: callee_half_on_stack:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    ld.hu $a0, $sp, 48
+; LA32D-ILP32S-NEXT:    move $fp, $a7
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: callee_half_on_stack:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    ld.hu $a0, $sp, 16
+; LA32D-ILP32D-NEXT:    move $fp, $a7
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    add.w $a0, $fp, $a0
+; LA32D-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: callee_half_on_stack:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT:    ld.hu $a0, $sp, 16
+; LA64S-NEXT:    move $fp, $a7
+; LA64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64S-NEXT:    add.w $a0, $fp, $a0
+; LA64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: callee_half_on_stack:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    ld.hu $a0, $sp, 80
+; LA64F-LP64S-NEXT:    move $fp, $a7
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: callee_half_on_stack:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    ld.hu $a0, $sp, 16
+; LA64F-LP64D-NEXT:    move $fp, $a7
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64F-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: callee_half_on_stack:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    ld.hu $a0, $sp, 80
+; LA64D-LP64S-NEXT:    move $fp, $a7
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64S-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: callee_half_on_stack:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    ld.hu $a0, $sp, 16
+; LA64D-LP64D-NEXT:    move $fp, $a7
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64D-NEXT:    add.w $a0, $fp, $a0
+; LA64D-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = fptosi half %q to i32
+  %2 = add i32 %h, %1
+  ret i32 %2
+}
+
+define i32 @caller_half_on_stack() nounwind {
+; LA32S-LABEL: caller_half_on_stack:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -48
+; LA32S-NEXT:    st.w $ra, $sp, 44 # 4-byte Folded Spill
+; LA32S-NEXT:    lu12i.w $a0, 4
+; LA32S-NEXT:    ori $a1, $a0, 3200
+; LA32S-NEXT:    st.w $a1, $sp, 32
+; LA32S-NEXT:    ori $a1, $a0, 3136
+; LA32S-NEXT:    st.w $a1, $sp, 28
+; LA32S-NEXT:    ori $a1, $a0, 3072
+; LA32S-NEXT:    st.w $a1, $sp, 24
+; LA32S-NEXT:    ori $a1, $a0, 2944
+; LA32S-NEXT:    st.w $a1, $sp, 20
+; LA32S-NEXT:    ori $a1, $a0, 2816
+; LA32S-NEXT:    st.w $a1, $sp, 16
+; LA32S-NEXT:    ori $a1, $a0, 2688
+; LA32S-NEXT:    st.w $a1, $sp, 12
+; LA32S-NEXT:    ori $a1, $a0, 2560
+; LA32S-NEXT:    st.w $a1, $sp, 8
+; LA32S-NEXT:    ori $a1, $a0, 2432
+; LA32S-NEXT:    st.w $a1, $sp, 4
+; LA32S-NEXT:    ori $t0, $a0, 2304
+; LA32S-NEXT:    ori $a0, $zero, 1
+; LA32S-NEXT:    ori $a1, $zero, 2
+; LA32S-NEXT:    ori $a2, $zero, 3
+; LA32S-NEXT:    ori $a3, $zero, 4
+; LA32S-NEXT:    ori $a4, $zero, 5
+; LA32S-NEXT:    ori $a5, $zero, 6
+; LA32S-NEXT:    ori $a6, $zero, 7
+; LA32S-NEXT:    ori $a7, $zero, 8
+; LA32S-NEXT:    st.w $t0, $sp, 0
+; LA32S-NEXT:    bl callee_half_on_stack
+; LA32S-NEXT:    ld.w $ra, $sp, 44 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 48
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: caller_half_on_stack:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -48
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 44 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    lu12i.w $a0, -12
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 3200
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 32
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 3136
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 28
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 3072
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 24
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 2944
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 20
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 2816
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 16
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 2688
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 12
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 2560
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 8
+; LA32F-ILP32S-NEXT:    ori $a1, $a0, 2432
+; LA32F-ILP32S-NEXT:    st.w $a1, $sp, 4
+; LA32F-ILP32S-NEXT:    ori $t0, $a0, 2304
+; LA32F-ILP32S-NEXT:    ori $a0, $zero, 1
+; LA32F-ILP32S-NEXT:    ori $a1, $zero, 2
+; LA32F-ILP32S-NEXT:    ori $a2, $zero, 3
+; LA32F-ILP32S-NEXT:    ori $a3, $zero, 4
+; LA32F-ILP32S-NEXT:    ori $a4, $zero, 5
+; LA32F-ILP32S-NEXT:    ori $a5, $zero, 6
+; LA32F-ILP32S-NEXT:    ori $a6, $zero, 7
+; LA32F-ILP32S-NEXT:    ori $a7, $zero, 8
+; LA32F-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32F-ILP32S-NEXT:    bl callee_half_on_stack
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 44 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 48
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: caller_half_on_stack:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    lu12i.w $a0, -12
+; LA32F-ILP32D-NEXT:    ori $t0, $a0, 3200
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA32F-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA32F-ILP32D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA32F-ILP32D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA32F-ILP32D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA32F-ILP32D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA32F-ILP32D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA32F-ILP32D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA32F-ILP32D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA32F-ILP32D-NEXT:    ori $a0, $zero, 1
+; LA32F-ILP32D-NEXT:    ori $a1, $zero, 2
+; LA32F-ILP32D-NEXT:    ori $a2, $zero, 3
+; LA32F-ILP32D-NEXT:    ori $a3, $zero, 4
+; LA32F-ILP32D-NEXT:    ori $a4, $zero, 5
+; LA32F-ILP32D-NEXT:    ori $a5, $zero, 6
+; LA32F-ILP32D-NEXT:    ori $a6, $zero, 7
+; LA32F-ILP32D-NEXT:    ori $a7, $zero, 8
+; LA32F-ILP32D-NEXT:    st.w $t0, $sp, 0
+; LA32F-ILP32D-NEXT:    bl callee_half_on_stack
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: caller_half_on_stack:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -48
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 44 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    lu12i.w $a0, -12
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 3200
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 32
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 3136
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 28
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 3072
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 24
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 2944
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 20
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 2816
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 16
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 2688
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 12
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 2560
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 8
+; LA32D-ILP32S-NEXT:    ori $a1, $a0, 2432
+; LA32D-ILP32S-NEXT:    st.w $a1, $sp, 4
+; LA32D-ILP32S-NEXT:    ori $t0, $a0, 2304
+; LA32D-ILP32S-NEXT:    ori $a0, $zero, 1
+; LA32D-ILP32S-NEXT:    ori $a1, $zero, 2
+; LA32D-ILP32S-NEXT:    ori $a2, $zero, 3
+; LA32D-ILP32S-NEXT:    ori $a3, $zero, 4
+; LA32D-ILP32S-NEXT:    ori $a4, $zero, 5
+; LA32D-ILP32S-NEXT:    ori $a5, $zero, 6
+; LA32D-ILP32S-NEXT:    ori $a6, $zero, 7
+; LA32D-ILP32S-NEXT:    ori $a7, $zero, 8
+; LA32D-ILP32S-NEXT:    st.w $t0, $sp, 0
+; LA32D-ILP32S-NEXT:    bl callee_half_on_stack
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 44 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 48
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: caller_half_on_stack:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    lu12i.w $a0, -12
+; LA32D-ILP32D-NEXT:    ori $t0, $a0, 3200
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA32D-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA32D-ILP32D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA32D-ILP32D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA32D-ILP32D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA32D-ILP32D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA32D-ILP32D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA32D-ILP32D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA32D-ILP32D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA32D-ILP32D-NEXT:    ori $a0, $zero, 1
+; LA32D-ILP32D-NEXT:    ori $a1, $zero, 2
+; LA32D-ILP32D-NEXT:    ori $a2, $zero, 3
+; LA32D-ILP32D-NEXT:    ori $a3, $zero, 4
+; LA32D-ILP32D-NEXT:    ori $a4, $zero, 5
+; LA32D-ILP32D-NEXT:    ori $a5, $zero, 6
+; LA32D-ILP32D-NEXT:    ori $a6, $zero, 7
+; LA32D-ILP32D-NEXT:    ori $a7, $zero, 8
+; LA32D-ILP32D-NEXT:    st.w $t0, $sp, 0
+; LA32D-ILP32D-NEXT:    bl callee_half_on_stack
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: caller_half_on_stack:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    lu12i.w $a0, -12
+; LA64S-NEXT:    ori $t0, $a0, 3200
+; LA64S-NEXT:    lu32i.d $t0, 0
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA64S-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA64S-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA64S-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA64S-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA64S-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA64S-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA64S-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA64S-NEXT:    ori $a0, $zero, 1
+; LA64S-NEXT:    ori $a1, $zero, 2
+; LA64S-NEXT:    ori $a2, $zero, 3
+; LA64S-NEXT:    ori $a3, $zero, 4
+; LA64S-NEXT:    ori $a4, $zero, 5
+; LA64S-NEXT:    ori $a5, $zero, 6
+; LA64S-NEXT:    ori $a6, $zero, 7
+; LA64S-NEXT:    ori $a7, $zero, 8
+; LA64S-NEXT:    st.w $t0, $sp, 0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(callee_half_on_stack)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: caller_half_on_stack:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -80
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    lu12i.w $a0, -12
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 3200
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 64
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 3136
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 56
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 3072
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 48
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 2944
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 40
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 2816
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 32
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 2688
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 24
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 2560
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 16
+; LA64F-LP64S-NEXT:    ori $a1, $a0, 2432
+; LA64F-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64F-LP64S-NEXT:    st.w $a1, $sp, 8
+; LA64F-LP64S-NEXT:    ori $t0, $a0, 2304
+; LA64F-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64F-LP64S-NEXT:    ori $a0, $zero, 1
+; LA64F-LP64S-NEXT:    ori $a1, $zero, 2
+; LA64F-LP64S-NEXT:    ori $a2, $zero, 3
+; LA64F-LP64S-NEXT:    ori $a3, $zero, 4
+; LA64F-LP64S-NEXT:    ori $a4, $zero, 5
+; LA64F-LP64S-NEXT:    ori $a5, $zero, 6
+; LA64F-LP64S-NEXT:    ori $a6, $zero, 7
+; LA64F-LP64S-NEXT:    ori $a7, $zero, 8
+; LA64F-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_on_stack)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 80
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: caller_half_on_stack:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    lu12i.w $a0, -12
+; LA64F-LP64D-NEXT:    ori $t0, $a0, 3200
+; LA64F-LP64D-NEXT:    lu32i.d $t0, 0
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA64F-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA64F-LP64D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA64F-LP64D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA64F-LP64D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA64F-LP64D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA64F-LP64D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA64F-LP64D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA64F-LP64D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA64F-LP64D-NEXT:    ori $a0, $zero, 1
+; LA64F-LP64D-NEXT:    ori $a1, $zero, 2
+; LA64F-LP64D-NEXT:    ori $a2, $zero, 3
+; LA64F-LP64D-NEXT:    ori $a3, $zero, 4
+; LA64F-LP64D-NEXT:    ori $a4, $zero, 5
+; LA64F-LP64D-NEXT:    ori $a5, $zero, 6
+; LA64F-LP64D-NEXT:    ori $a6, $zero, 7
+; LA64F-LP64D-NEXT:    ori $a7, $zero, 8
+; LA64F-LP64D-NEXT:    st.w $t0, $sp, 0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_on_stack)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: caller_half_on_stack:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -80
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    lu12i.w $a0, -12
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 3200
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 64
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 3136
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 56
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 3072
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 48
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 2944
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 40
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 2816
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 32
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 2688
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 24
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 2560
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 16
+; LA64D-LP64S-NEXT:    ori $a1, $a0, 2432
+; LA64D-LP64S-NEXT:    lu32i.d $a1, 0
+; LA64D-LP64S-NEXT:    st.w $a1, $sp, 8
+; LA64D-LP64S-NEXT:    ori $t0, $a0, 2304
+; LA64D-LP64S-NEXT:    lu32i.d $t0, 0
+; LA64D-LP64S-NEXT:    ori $a0, $zero, 1
+; LA64D-LP64S-NEXT:    ori $a1, $zero, 2
+; LA64D-LP64S-NEXT:    ori $a2, $zero, 3
+; LA64D-LP64S-NEXT:    ori $a3, $zero, 4
+; LA64D-LP64S-NEXT:    ori $a4, $zero, 5
+; LA64D-LP64S-NEXT:    ori $a5, $zero, 6
+; LA64D-LP64S-NEXT:    ori $a6, $zero, 7
+; LA64D-LP64S-NEXT:    ori $a7, $zero, 8
+; LA64D-LP64S-NEXT:    st.w $t0, $sp, 0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_on_stack)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 80
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: caller_half_on_stack:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    lu12i.w $a0, -12
+; LA64D-LP64D-NEXT:    ori $t0, $a0, 3200
+; LA64D-LP64D-NEXT:    lu32i.d $t0, 0
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA64D-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA64D-LP64D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA64D-LP64D-NEXT:    fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA64D-LP64D-NEXT:    fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA64D-LP64D-NEXT:    fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA64D-LP64D-NEXT:    fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA64D-LP64D-NEXT:    fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA64D-LP64D-NEXT:    fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA64D-LP64D-NEXT:    ori $a0, $zero, 1
+; LA64D-LP64D-NEXT:    ori $a1, $zero, 2
+; LA64D-LP64D-NEXT:    ori $a2, $zero, 3
+; LA64D-LP64D-NEXT:    ori $a3, $zero, 4
+; LA64D-LP64D-NEXT:    ori $a4, $zero, 5
+; LA64D-LP64D-NEXT:    ori $a5, $zero, 6
+; LA64D-LP64D-NEXT:    ori $a6, $zero, 7
+; LA64D-LP64D-NEXT:    ori $a7, $zero, 8
+; LA64D-LP64D-NEXT:    st.w $t0, $sp, 0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_on_stack)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = call i32 @callee_half_on_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 10.0, half 11.0, half 12.0, half 13.0, half 14.0, half 15.0, half 16.0, half 17.0, half 18.0)
+  ret i32 %1
+}
+
+define half @callee_half_ret() nounwind {
+; LA32S-LABEL: callee_half_ret:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    lu12i.w $a0, 3
+; LA32S-NEXT:    ori $a0, $a0, 3072
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: callee_half_ret:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    lu12i.w $a0, -13
+; LA32F-ILP32S-NEXT:    ori $a0, $a0, 3072
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: callee_half_ret:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA32F-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: callee_half_ret:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    lu12i.w $a0, -13
+; LA32D-ILP32S-NEXT:    ori $a0, $a0, 3072
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: callee_half_ret:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA32D-ILP32D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: callee_half_ret:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: callee_half_ret:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64F-LP64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: callee_half_ret:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64F-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: callee_half_ret:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64D-LP64S-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: callee_half_ret:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64D-LP64D-NEXT:    fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64D-LP64D-NEXT:    ret
+  ret half 1.0
+}
+
+define i32 @caller_half_ret() nounwind {
+; LA32S-LABEL: caller_half_ret:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    bl callee_half_ret
+; LA32S-NEXT:    bstrpick.w $a0, $a0, 15, 0
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    bl __fixsfsi
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: caller_half_ret:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    bl callee_half_ret
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: caller_half_ret:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    bl callee_half_ret
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: caller_half_ret:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    bl callee_half_ret
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: caller_half_ret:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    bl callee_half_ret
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: caller_half_ret:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -16
+; LA64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    pcaddu18i $ra, %call36(callee_half_ret)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 16
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: caller_half_ret:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_ret)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: caller_half_ret:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_ret)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64F-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: caller_half_ret:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(callee_half_ret)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: caller_half_ret:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(callee_half_ret)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-LP64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = call half @callee_half_ret()
+  %2 = fptosi half %1 to i32
+  ret i32 %2
+}
diff --git a/llvm/test/CodeGen/LoongArch/fp16-promote.ll b/llvm/test/CodeGen/LoongArch/fp16-promote.ll
index 6a1610c27937d..c49e9ba99e0f8 100644
--- a/llvm/test/CodeGen/LoongArch/fp16-promote.ll
+++ b/llvm/test/CodeGen/LoongArch/fp16-promote.ll
@@ -22,14 +22,26 @@ define void @test_load_store(ptr %p, ptr %q) nounwind {
 define float @test_fpextend_float(ptr %p) nounwind {
 ; LA32-LABEL: test_fpextend_float:
 ; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ld.hu $a0, $a0, 0
-; LA32-NEXT:    b __extendhfsf2
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_fpextend_float:
 ; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; LA64-NEXT:    ld.hu $a0, $a0, 0
-; LA64-NEXT:    pcaddu18i $t8, %call36(__extendhfsf2)
-; LA64-NEXT:    jr $t8
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
   %a = load half, ptr %p
   %r = fpext half %a to float
   ret float %r
@@ -41,6 +53,7 @@ define double @test_fpextend_double(ptr %p) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -16
 ; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
 ; LA32-NEXT:    ld.hu $a0, $a0, 0
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fcvt.d.s $fa0, $fa0
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -52,6 +65,7 @@ define double @test_fpextend_double(ptr %p) nounwind {
 ; LA64-NEXT:    addi.d $sp, $sp, -16
 ; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; LA64-NEXT:    ld.hu $a0, $a0, 0
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
 ; LA64-NEXT:    fcvt.d.s $fa0, $fa0
@@ -71,6 +85,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
 ; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
 ; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
 ; LA32-NEXT:    st.h $a0, $fp, 0
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -85,6 +100,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
 ; LA64-NEXT:    move $fp, $a0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
 ; LA64-NEXT:    st.h $a0, $fp, 0
 ; LA64-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
@@ -103,6 +119,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 ; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
 ; LA32-NEXT:    bl __truncdfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
 ; LA32-NEXT:    st.h $a0, $fp, 0
 ; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -117,6 +134,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 ; LA64-NEXT:    move $fp, $a0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncdfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
 ; LA64-NEXT:    st.h $a0, $fp, 0
 ; LA64-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
@@ -130,43 +148,51 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
 define half @test_fadd_reg(half %a, half %b) nounwind {
 ; LA32-LABEL: test_fadd_reg:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
-; LA32-NEXT:    fst.d $fs0, $sp, 0 # 8-byte Folded Spill
-; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    move $a0, $a1
-; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
 ; LA32-NEXT:    fmov.s $fs0, $fa0
-; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    fmov.s $fa0, $fa1
+; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    fmov.s $fs1, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs0
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fadd.s $fa0, $fa0, $fs0
+; LA32-NEXT:    fadd.s $fa0, $fa0, $fs1
 ; LA32-NEXT:    bl __truncsfhf2
-; LA32-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
-; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    lu12i.w $a1, -16
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_fadd_reg:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $sp, $sp, -32
 ; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
-; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
-; LA64-NEXT:    fst.d $fs0, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    move $fp, $a0
-; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    fmov.s $fs0, $fa0
+; LA64-NEXT:    fmov.s $fa0, $fa1
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fmov.s $fs0, $fa0
-; LA64-NEXT:    move $a0, $fp
+; LA64-NEXT:    fmov.s $fs1, $fa0
+; LA64-NEXT:    fmov.s $fa0, $fs0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fadd.s $fa0, $fa0, $fs0
+; LA64-NEXT:    fadd.s $fa0, $fa0, $fs1
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 32
 ; LA64-NEXT:    ret
@@ -180,20 +206,23 @@ define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -32
 ; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
-; LA32-NEXT:    fst.d $fs0, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    ld.hu $s0, $a0, 0
-; LA32-NEXT:    ld.hu $a0, $a1, 0
+; LA32-NEXT:    ld.hu $a0, $a0, 0
+; LA32-NEXT:    ld.hu $a1, $a1, 0
+; LA32-NEXT:    movgr2fr.w $fs0, $a0
+; LA32-NEXT:    movgr2fr.w $fa0, $a1
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fmov.s $fs0, $fa0
-; LA32-NEXT:    move $a0, $s0
+; LA32-NEXT:    fmov.s $fs1, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs0
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fadd.s $fa0, $fa0, $fs0
+; LA32-NEXT:    fadd.s $fa0, $fa0, $fs1
 ; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
 ; LA32-NEXT:    st.h $a0, $fp, 0
-; LA32-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
 ; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 32
@@ -209,15 +238,17 @@ define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
 ; LA64-NEXT:    move $fp, $a0
 ; LA64-NEXT:    ld.hu $s0, $a0, 0
 ; LA64-NEXT:    ld.hu $a0, $a1, 0
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
 ; LA64-NEXT:    fmov.s $fs0, $fa0
-; LA64-NEXT:    move $a0, $s0
+; LA64-NEXT:    movgr2fr.w $fa0, $s0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
 ; LA64-NEXT:    fadd.s $fa0, $fa0, $fs0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
 ; LA64-NEXT:    st.h $a0, $fp, 0
 ; LA64-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
@@ -235,43 +266,51 @@ define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
 define half @test_fmul_reg(half %a, half %b) nounwind {
 ; LA32-LABEL: test_fmul_reg:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
-; LA32-NEXT:    fst.d $fs0, $sp, 0 # 8-byte Folded Spill
-; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    move $a0, $a1
-; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
 ; LA32-NEXT:    fmov.s $fs0, $fa0
-; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    fmov.s $fa0, $fa1
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fmul.s $fa0, $fa0, $fs0
+; LA32-NEXT:    fmov.s $fs1, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs0
+; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    fmul.s $fa0, $fa0, $fs1
 ; LA32-NEXT:    bl __truncsfhf2
-; LA32-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
-; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    lu12i.w $a1, -16
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: test_fmul_reg:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    addi.d $sp, $sp, -32
 ; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
-; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
-; LA64-NEXT:    fst.d $fs0, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT:    move $fp, $a0
-; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    fmov.s $fs0, $fa0
+; LA64-NEXT:    fmov.s $fa0, $fa1
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fmov.s $fs0, $fa0
-; LA64-NEXT:    move $a0, $fp
+; LA64-NEXT:    fmov.s $fs1, $fa0
+; LA64-NEXT:    fmov.s $fa0, $fs0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fmul.s $fa0, $fa0, $fs0
+; LA64-NEXT:    fmul.s $fa0, $fa0, $fs1
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 32
 ; LA64-NEXT:    ret
@@ -285,20 +324,23 @@ define void @test_fmul_mem(ptr %p, ptr %q) nounwind {
 ; LA32-NEXT:    addi.w $sp, $sp, -32
 ; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
 ; LA32-NEXT:    st.w $fp, $sp, 24 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s0, $sp, 20 # 4-byte Folded Spill
-; LA32-NEXT:    fst.d $fs0, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
 ; LA32-NEXT:    move $fp, $a0
-; LA32-NEXT:    ld.hu $s0, $a0, 0
-; LA32-NEXT:    ld.hu $a0, $a1, 0
+; LA32-NEXT:    ld.hu $a0, $a0, 0
+; LA32-NEXT:    ld.hu $a1, $a1, 0
+; LA32-NEXT:    movgr2fr.w $fs0, $a0
+; LA32-NEXT:    movgr2fr.w $fa0, $a1
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fmov.s $fs0, $fa0
-; LA32-NEXT:    move $a0, $s0
+; LA32-NEXT:    fmov.s $fs1, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs0
 ; LA32-NEXT:    bl __extendhfsf2
-; LA32-NEXT:    fmul.s $fa0, $fa0, $fs0
+; LA32-NEXT:    fmul.s $fa0, $fa0, $fs1
 ; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
 ; LA32-NEXT:    st.h $a0, $fp, 0
-; LA32-NEXT:    fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA32-NEXT:    ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
 ; LA32-NEXT:    ld.w $fp, $sp, 24 # 4-byte Folded Reload
 ; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 32
@@ -314,15 +356,17 @@ define void @test_fmul_mem(ptr %p, ptr %q) nounwind {
 ; LA64-NEXT:    move $fp, $a0
 ; LA64-NEXT:    ld.hu $s0, $a0, 0
 ; LA64-NEXT:    ld.hu $a0, $a1, 0
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
 ; LA64-NEXT:    fmov.s $fs0, $fa0
-; LA64-NEXT:    move $a0, $s0
+; LA64-NEXT:    movgr2fr.w $fa0, $s0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
 ; LA64-NEXT:    fmul.s $fa0, $fa0, $fs0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
 ; LA64-NEXT:    st.h $a0, $fp, 0
 ; LA64-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
 ; LA64-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
@@ -347,6 +391,10 @@ define half @freeze_half_undef() nounwind {
 ; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fadd.s $fa0, $fa0, $fa0
 ; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    lu12i.w $a1, -16
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -363,6 +411,10 @@ define half @freeze_half_undef() nounwind {
 ; LA64-NEXT:    fadd.s $fa0, $fa0, $fa0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
 ; LA64-NEXT:    ret
@@ -379,6 +431,10 @@ define half @freeze_half_poison(half %maybe.poison) nounwind {
 ; LA32-NEXT:    bl __extendhfsf2
 ; LA32-NEXT:    fadd.s $fa0, $fa0, $fa0
 ; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    lu12i.w $a1, -16
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
 ; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    ret
@@ -392,6 +448,10 @@ define half @freeze_half_poison(half %maybe.poison) nounwind {
 ; LA64-NEXT:    fadd.s $fa0, $fa0, $fa0
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
 ; LA64-NEXT:    ret
@@ -418,8 +478,8 @@ define signext i32 @test_half_to_s32(half %a) nounwind {
 ; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64-NEXT:    movfr2gr.d $a0, $fa0
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
 ; LA64-NEXT:    ret
@@ -446,8 +506,8 @@ define zeroext i32 @test_half_to_s32_u32(half %a) nounwind {
 ; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
 ; LA64-NEXT:    jirl $ra, $ra, 0
-; LA64-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64-NEXT:    movfr2gr.d $a0, $fa0
 ; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
 ; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; LA64-NEXT:    addi.d $sp, $sp, 16
diff --git a/llvm/test/CodeGen/LoongArch/issue97975.ll b/llvm/test/CodeGen/LoongArch/issue97975.ll
new file mode 100644
index 0000000000000..2422a7354cb2a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/issue97975.ll
@@ -0,0 +1,438 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32D
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32D
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64D
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64D
+
+define half @f(half %a, half %b, half %c) {
+; LA32S-LABEL: f:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    addi.w $sp, $sp, -16
+; LA32S-NEXT:    .cfi_def_cfa_offset 16
+; LA32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32S-NEXT:    st.w $s1, $sp, 0 # 4-byte Folded Spill
+; LA32S-NEXT:    .cfi_offset 1, -4
+; LA32S-NEXT:    .cfi_offset 22, -8
+; LA32S-NEXT:    .cfi_offset 23, -12
+; LA32S-NEXT:    .cfi_offset 24, -16
+; LA32S-NEXT:    move $fp, $a2
+; LA32S-NEXT:    move $s0, $a0
+; LA32S-NEXT:    bstrpick.w $a0, $a1, 15, 0
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    move $s1, $a0
+; LA32S-NEXT:    bstrpick.w $a0, $s0, 15, 0
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    move $a1, $s1
+; LA32S-NEXT:    bl __addsf3
+; LA32S-NEXT:    bl __truncsfhf2
+; LA32S-NEXT:    move $s0, $a0
+; LA32S-NEXT:    bstrpick.w $a0, $fp, 15, 0
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    move $fp, $a0
+; LA32S-NEXT:    bstrpick.w $a0, $s0, 15, 0
+; LA32S-NEXT:    bl __extendhfsf2
+; LA32S-NEXT:    move $a1, $fp
+; LA32S-NEXT:    bl __addsf3
+; LA32S-NEXT:    bl __truncsfhf2
+; LA32S-NEXT:    ld.w $s1, $sp, 0 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT:    addi.w $sp, $sp, 16
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: f:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    .cfi_def_cfa_offset 16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    .cfi_offset 1, -4
+; LA32F-ILP32S-NEXT:    .cfi_offset 22, -8
+; LA32F-ILP32S-NEXT:    .cfi_offset 23, -12
+; LA32F-ILP32S-NEXT:    move $fp, $a2
+; LA32F-ILP32S-NEXT:    move $s0, $a0
+; LA32F-ILP32S-NEXT:    move $a0, $a1
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    fst.s $fa0, $sp, 0 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    move $a0, $s0
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    fld.s $fa1, $sp, 0 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    bl __truncsfhf2
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    fst.s $fa0, $sp, 0 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    move $a0, $fp
+; LA32F-ILP32S-NEXT:    bl __extendhfsf2
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    fld.s $fa1, $sp, 0 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    bl __truncsfhf2
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32S-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: f:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    .cfi_def_cfa_offset 16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    fst.s $fs0, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    fst.s $fs1, $sp, 4 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    fst.s $fs2, $sp, 0 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    .cfi_offset 1, -4
+; LA32F-ILP32D-NEXT:    .cfi_offset 56, -8
+; LA32F-ILP32D-NEXT:    .cfi_offset 57, -12
+; LA32F-ILP32D-NEXT:    .cfi_offset 58, -16
+; LA32F-ILP32D-NEXT:    fmov.s $fs0, $fa2
+; LA32F-ILP32D-NEXT:    fmov.s $fs1, $fa0
+; LA32F-ILP32D-NEXT:    fmov.s $fa0, $fa1
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    fmov.s $fs2, $fa0
+; LA32F-ILP32D-NEXT:    fmov.s $fa0, $fs1
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    fadd.s $fa0, $fa0, $fs2
+; LA32F-ILP32D-NEXT:    bl __truncsfhf2
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    fmov.s $fs1, $fa0
+; LA32F-ILP32D-NEXT:    fmov.s $fa0, $fs0
+; LA32F-ILP32D-NEXT:    bl __extendhfsf2
+; LA32F-ILP32D-NEXT:    fadd.s $fa0, $fs1, $fa0
+; LA32F-ILP32D-NEXT:    bl __truncsfhf2
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    fld.s $fs2, $sp, 0 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    fld.s $fs1, $sp, 4 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    fld.s $fs0, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: f:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32S-NEXT:    .cfi_def_cfa_offset 16
+; LA32D-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    .cfi_offset 1, -4
+; LA32D-ILP32S-NEXT:    .cfi_offset 22, -8
+; LA32D-ILP32S-NEXT:    .cfi_offset 23, -12
+; LA32D-ILP32S-NEXT:    move $fp, $a2
+; LA32D-ILP32S-NEXT:    move $s0, $a0
+; LA32D-ILP32S-NEXT:    move $a0, $a1
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    fst.s $fa0, $sp, 0 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    move $a0, $s0
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    fld.s $fa1, $sp, 0 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    bl __truncsfhf2
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    fst.s $fa0, $sp, 0 # 4-byte Folded Spill
+; LA32D-ILP32S-NEXT:    move $a0, $fp
+; LA32D-ILP32S-NEXT:    bl __extendhfsf2
+; LA32D-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32S-NEXT:    fld.s $fa1, $sp, 0 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32D-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32S-NEXT:    bl __truncsfhf2
+; LA32D-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32S-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: f:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -32
+; LA32D-ILP32D-NEXT:    .cfi_def_cfa_offset 32
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32D-ILP32D-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA32D-ILP32D-NEXT:    fst.d $fs2, $sp, 0 # 8-byte Folded Spill
+; LA32D-ILP32D-NEXT:    .cfi_offset 1, -4
+; LA32D-ILP32D-NEXT:    .cfi_offset 56, -16
+; LA32D-ILP32D-NEXT:    .cfi_offset 57, -24
+; LA32D-ILP32D-NEXT:    .cfi_offset 58, -32
+; LA32D-ILP32D-NEXT:    fmov.s $fs0, $fa2
+; LA32D-ILP32D-NEXT:    fmov.s $fs1, $fa0
+; LA32D-ILP32D-NEXT:    fmov.s $fa0, $fa1
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    fmov.s $fs2, $fa0
+; LA32D-ILP32D-NEXT:    fmov.s $fa0, $fs1
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    fadd.s $fa0, $fa0, $fs2
+; LA32D-ILP32D-NEXT:    bl __truncsfhf2
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    fmov.s $fs1, $fa0
+; LA32D-ILP32D-NEXT:    fmov.s $fa0, $fs0
+; LA32D-ILP32D-NEXT:    bl __extendhfsf2
+; LA32D-ILP32D-NEXT:    fadd.s $fa0, $fs1, $fa0
+; LA32D-ILP32D-NEXT:    bl __truncsfhf2
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    fld.d $fs2, $sp, 0 # 8-byte Folded Reload
+; LA32D-ILP32D-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32D-ILP32D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 32
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: f:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    addi.d $sp, $sp, -32
+; LA64S-NEXT:    .cfi_def_cfa_offset 32
+; LA64S-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64S-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64S-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT:    fst.d $fs2, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT:    .cfi_offset 1, -8
+; LA64S-NEXT:    .cfi_offset 56, -16
+; LA64S-NEXT:    .cfi_offset 57, -24
+; LA64S-NEXT:    .cfi_offset 58, -32
+; LA64S-NEXT:    fmov.s $fs0, $fa2
+; LA64S-NEXT:    fmov.s $fs1, $fa0
+; LA64S-NEXT:    fmov.s $fa0, $fa1
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    fmov.s $fs2, $fa0
+; LA64S-NEXT:    fmov.s $fa0, $fs1
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    fadd.s $fa0, $fa0, $fs2
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    fmov.s $fs1, $fa0
+; LA64S-NEXT:    fmov.s $fa0, $fs0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    fadd.s $fa0, $fs1, $fa0
+; LA64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64S-NEXT:    jirl $ra, $ra, 0
+; LA64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64S-NEXT:    lu12i.w $a1, -16
+; LA64S-NEXT:    or $a0, $a0, $a1
+; LA64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64S-NEXT:    fld.d $fs2, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64S-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64S-NEXT:    addi.d $sp, $sp, 32
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: f:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -32
+; LA64F-LP64S-NEXT:    .cfi_def_cfa_offset 32
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    .cfi_offset 1, -8
+; LA64F-LP64S-NEXT:    .cfi_offset 22, -16
+; LA64F-LP64S-NEXT:    .cfi_offset 23, -24
+; LA64F-LP64S-NEXT:    move $fp, $a2
+; LA64F-LP64S-NEXT:    move $s0, $a0
+; LA64F-LP64S-NEXT:    move $a0, $a1
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    fst.s $fa0, $sp, 4 # 4-byte Folded Spill
+; LA64F-LP64S-NEXT:    move $a0, $s0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    fld.s $fa1, $sp, 4 # 4-byte Folded Reload
+; LA64F-LP64S-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    fst.s $fa0, $sp, 4 # 4-byte Folded Spill
+; LA64F-LP64S-NEXT:    move $a0, $fp
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    fld.s $fa1, $sp, 4 # 4-byte Folded Reload
+; LA64F-LP64S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64S-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 32
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: f:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -32
+; LA64F-LP64D-NEXT:    .cfi_def_cfa_offset 32
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    fst.d $fs2, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    .cfi_offset 1, -8
+; LA64F-LP64D-NEXT:    .cfi_offset 56, -16
+; LA64F-LP64D-NEXT:    .cfi_offset 57, -24
+; LA64F-LP64D-NEXT:    .cfi_offset 58, -32
+; LA64F-LP64D-NEXT:    fmov.s $fs0, $fa2
+; LA64F-LP64D-NEXT:    fmov.s $fs1, $fa0
+; LA64F-LP64D-NEXT:    fmov.s $fa0, $fa1
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    fmov.s $fs2, $fa0
+; LA64F-LP64D-NEXT:    fmov.s $fa0, $fs1
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    fadd.s $fa0, $fa0, $fs2
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    fmov.s $fs1, $fa0
+; LA64F-LP64D-NEXT:    fmov.s $fa0, $fs0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    fadd.s $fa0, $fs1, $fa0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    fld.d $fs2, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 32
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: f:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -32
+; LA64D-LP64S-NEXT:    .cfi_def_cfa_offset 32
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    .cfi_offset 1, -8
+; LA64D-LP64S-NEXT:    .cfi_offset 22, -16
+; LA64D-LP64S-NEXT:    .cfi_offset 23, -24
+; LA64D-LP64S-NEXT:    move $fp, $a2
+; LA64D-LP64S-NEXT:    move $s0, $a0
+; LA64D-LP64S-NEXT:    move $a0, $a1
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    fst.s $fa0, $sp, 4 # 4-byte Folded Spill
+; LA64D-LP64S-NEXT:    move $a0, $s0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    fld.s $fa1, $sp, 4 # 4-byte Folded Reload
+; LA64D-LP64S-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    fst.s $fa0, $sp, 4 # 4-byte Folded Spill
+; LA64D-LP64S-NEXT:    move $a0, $fp
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    fld.s $fa1, $sp, 4 # 4-byte Folded Reload
+; LA64D-LP64S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64S-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 32
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: f:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -32
+; LA64D-LP64D-NEXT:    .cfi_def_cfa_offset 32
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    fst.d $fs2, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    .cfi_offset 1, -8
+; LA64D-LP64D-NEXT:    .cfi_offset 56, -16
+; LA64D-LP64D-NEXT:    .cfi_offset 57, -24
+; LA64D-LP64D-NEXT:    .cfi_offset 58, -32
+; LA64D-LP64D-NEXT:    fmov.s $fs0, $fa2
+; LA64D-LP64D-NEXT:    fmov.s $fs1, $fa0
+; LA64D-LP64D-NEXT:    fmov.s $fa0, $fa1
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    fmov.s $fs2, $fa0
+; LA64D-LP64D-NEXT:    fmov.s $fa0, $fs1
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    fadd.s $fa0, $fa0, $fs2
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    fmov.s $fs1, $fa0
+; LA64D-LP64D-NEXT:    fmov.s $fa0, $fs0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    fadd.s $fa0, $fs1, $fa0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    fld.d $fs2, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 32
+; LA64D-LP64D-NEXT:    ret
+    %d = fadd half %a, %b
+    %e = fadd half %d, %c
+    ret half %e
+}
diff --git a/llvm/test/CodeGen/LoongArch/issue97981.ll b/llvm/test/CodeGen/LoongArch/issue97981.ll
new file mode 100644
index 0000000000000..856cd46de8d3d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/issue97981.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32D
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32S
+; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32D
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64D
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64S
+; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64D
+
+define half @to_half(i16 %bits) {
+; LA32S-LABEL: to_half:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: to_half:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: to_half:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: to_half:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: to_half:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: to_half:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    lu12i.w $a1, -16
+; LA64S-NEXT:    or $a0, $a0, $a1
+; LA64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: to_half:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: to_half:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: to_half:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: to_half:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    ret
+    %f = bitcast i16 %bits to half
+    ret half %f
+}
+
+define i16 @from_half(half %f) {
+; LA32S-LABEL: from_half:
+; LA32S:       # %bb.0:
+; LA32S-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: from_half:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: from_half:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32S-LABEL: from_half:
+; LA32D-ILP32S:       # %bb.0:
+; LA32D-ILP32S-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: from_half:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64S-LABEL: from_half:
+; LA64S:       # %bb.0:
+; LA64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64S-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: from_half:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: from_half:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: from_half:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: from_half:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    ret
+    %bits = bitcast half %f to i16
+    ret i16 %bits
+}

>From 634e2eb5829729ba4ebbb63dc73a3e237da6f976 Mon Sep 17 00:00:00 2001
From: Ami-zhang <zhanglimin at loongson.cn>
Date: Thu, 17 Apr 2025 15:59:05 +0800
Subject: [PATCH 2/3] [clang][LoongArch] Add support for the _Float16 type

Enable _Float16 for LoongArch target. Additionally, this change
fixes incorrect ABI lowering of _Float16 in the case of structs
containing fp16 that are eligible for passing via GPR+FPR or FPR+FPR.
Finally, it also fixes int16 -> __fp16 conversion code gen, which
uses generic LLVM IR rather than llvm.convert.to.fp16 intrinsics.
---
 clang/docs/LanguageExtensions.rst             |  1 +
 clang/lib/Basic/Targets/LoongArch.h           |  3 +
 clang/lib/CodeGen/Targets/LoongArch.cpp       |  7 +-
 clang/test/CodeGen/LoongArch/__fp16-convert.c | 30 ++++++++
 clang/test/CodeGen/LoongArch/abi-lp64d.c      | 71 +++++++++++++++++++
 5 files changed, 108 insertions(+), 4 deletions(-)
 create mode 100644 clang/test/CodeGen/LoongArch/__fp16-convert.c

diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index a40dd4d1a1673..088f01a0199e4 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -1001,6 +1001,7 @@ to ``float``; see below for more information on this emulation.
   * X86 (if SSE2 is available; natively if AVX512-FP16 is also available)
   * RISC-V (natively if Zfh or Zhinx is available)
   * SystemZ (emulated)
+  * LoongArch
 
 * ``__bf16`` is supported on the following targets (currently never natively):
 
diff --git a/clang/lib/Basic/Targets/LoongArch.h b/clang/lib/Basic/Targets/LoongArch.h
index 4c7b53abfef9b..8a8c978ab89db 100644
--- a/clang/lib/Basic/Targets/LoongArch.h
+++ b/clang/lib/Basic/Targets/LoongArch.h
@@ -53,6 +53,7 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
     LongDoubleAlign = 128;
     LongDoubleFormat = &llvm::APFloat::IEEEquad();
     MCountName = "_mcount";
+    HasFloat16 = true;
     SuitableAlign = 128;
     WCharType = SignedInt;
     WIntType = UnsignedInt;
@@ -98,6 +99,8 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
 
   bool hasBitIntType() const override { return true; }
 
+  bool useFP16ConversionIntrinsics() const override { return false; }
+
   bool handleTargetFeatures(std::vector<std::string> &Features,
                             DiagnosticsEngine &Diags) override;
 
diff --git a/clang/lib/CodeGen/Targets/LoongArch.cpp b/clang/lib/CodeGen/Targets/LoongArch.cpp
index 0f689371a60db..7640f3779816a 100644
--- a/clang/lib/CodeGen/Targets/LoongArch.cpp
+++ b/clang/lib/CodeGen/Targets/LoongArch.cpp
@@ -110,10 +110,9 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
     uint64_t Size = getContext().getTypeSize(Ty);
     if (IsInt && Size > GRLen)
       return false;
-    // Can't be eligible if larger than the FP registers. Half precision isn't
-    // currently supported on LoongArch and the ABI hasn't been confirmed, so
-    // default to the integer ABI in that case.
-    if (IsFloat && (Size > FRLen || Size < 32))
+    // Can't be eligible if larger than the FP registers. Handling of half
+    // precision values has been specified in the ABI, so don't block those.
+    if (IsFloat && Size > FRLen)
       return false;
     // Can't be eligible if an integer type was already found (int+int pairs
     // are not eligible).
diff --git a/clang/test/CodeGen/LoongArch/__fp16-convert.c b/clang/test/CodeGen/LoongArch/__fp16-convert.c
new file mode 100644
index 0000000000000..84ef5de960b47
--- /dev/null
+++ b/clang/test/CodeGen/LoongArch/__fp16-convert.c
@@ -0,0 +1,30 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -triple loongarch64 -emit-llvm %s -o - \
+// RUN:   | FileCheck %s
+
+__fp16 y;
+short z;
+// CHECK-LABEL: define dso_local void @bar1(
+// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load half, ptr @y, align 2
+// CHECK-NEXT:    [[CONV:%.*]] = fpext half [[TMP0]] to float
+// CHECK-NEXT:    [[CONV1:%.*]] = fptosi float [[CONV]] to i16
+// CHECK-NEXT:    store i16 [[CONV1]], ptr @z, align 2
+// CHECK-NEXT:    ret void
+//
+void bar1(){
+    z = y;
+}
+// CHECK-LABEL: define dso_local void @bar2(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @z, align 2
+// CHECK-NEXT:    [[CONV:%.*]] = sitofp i16 [[TMP0]] to float
+// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc float [[CONV]] to half
+// CHECK-NEXT:    store half [[CONV1]], ptr @y, align 2
+// CHECK-NEXT:    ret void
+//
+void bar2(){
+    y = z;
+}
diff --git a/clang/test/CodeGen/LoongArch/abi-lp64d.c b/clang/test/CodeGen/LoongArch/abi-lp64d.c
index fc7f1eada586b..9f64cfd662e5f 100644
--- a/clang/test/CodeGen/LoongArch/abi-lp64d.c
+++ b/clang/test/CodeGen/LoongArch/abi-lp64d.c
@@ -48,6 +48,9 @@ unsigned long check_ulong() { return 0; }
 // CHECK-LABEL: define{{.*}} i64 @check_ulonglong()
 unsigned long long check_ulonglong() { return 0; }
 
+// CHECK-LABEL: define{{.*}}  half @check_float16()
+_Float16 check_float16() { return 0; }
+
 // CHECK-LABEL: define{{.*}} float @check_float()
 float check_float() { return 0; }
 
@@ -127,6 +130,14 @@ struct i16x4_s f_i16x4_s(struct i16x4_s x) {
 /// available, the value is passed in a GAR; if no GAR is available, the value
 /// is passed on the stack.
 
+struct f16x1_s {
+  __fp16 a;
+};
+
+struct float16x1_s {
+  _Float16 a;
+};
+
 struct f32x1_s {
   float a;
 };
@@ -135,6 +146,16 @@ struct f64x1_s {
   double a;
 };
 
+// CHECK-LABEL: define{{.*}} half @f_f16x1_s(half %0)
+struct f16x1_s f_f16x1_s(struct f16x1_s x) {
+  return x;
+}
+
+// CHECK-LABEL: define{{.*}} half @f_float16x1_s(half %0)
+struct float16x1_s f_float16x1_s(struct float16x1_s x) {
+  return x;
+}
+
 // CHECK-LABEL: define{{.*}} float @f_f32x1_s(float %0)
 struct f32x1_s f_f32x1_s(struct f32x1_s x) {
   return x;
@@ -151,10 +172,20 @@ struct f64x1_s f_f64x1_s(struct f64x1_s x) {
 /// number of available FAR is less than 2, it’s passed in a GAR, and passed on
 /// the stack if no GAR is available.
 
+struct f16x2_s {
+  __fp16 a;
+  _Float16 b;
+};
+
 struct f32x2_s {
   float a, b;
 };
 
+// CHECK-LABEL: define{{.*}} { half, half } @f_f16x2_s(half %0, half %1)
+struct f16x2_s f_f16x2_s(struct f16x2_s x) {
+  return x;
+}
+
 // CHECK-LABEL: define{{.*}} { float, float } @f_f32x2_s(float %0, float %1)
 struct f32x2_s f_f32x2_s(struct f32x2_s x) {
   return x;
@@ -165,11 +196,21 @@ struct f32x2_s f_f32x2_s(struct f32x2_s x) {
 /// i. Multiple fixed-point members. If there are available GAR, the structure
 /// is passed in a GAR, and passed on the stack if no GAR is available.
 
+struct f16x1_i16x2_s {
+  _Float16 a;
+  int16_t b, c;
+};
+
 struct f32x1_i16x2_s {
   float a;
   int16_t b, c;
 };
 
+// CHECK-LABEL: define{{.*}} i64 @f_f16x1_i16x2_s(i64 %x.coerce)
+struct f16x1_i16x2_s f_f16x1_i16x2_s(struct f16x1_i16x2_s x) {
+  return x;
+}
+
 // CHECK-LABEL: define{{.*}} i64 @f_f32x1_i16x2_s(i64 %x.coerce)
 struct f32x1_i16x2_s f_f32x1_i16x2_s(struct f32x1_i16x2_s x) {
   return x;
@@ -181,11 +222,21 @@ struct f32x1_i16x2_s f_f32x1_i16x2_s(struct f32x1_i16x2_s x) {
 /// but one GAR is available, it’s passed in GAR; If no GAR is available, it’s
 /// passed on the stack.
 
+struct f16x1_i32x1_s {
+  _Float16 a;
+  int32_t b;
+};
+
 struct f32x1_i32x1_s {
   float a;
   int32_t b;
 };
 
+// CHECK-LABEL: define{{.*}} { half, i32 } @f_f16x1_i32x1_s(half %0, i32 %1)
+struct f16x1_i32x1_s f_f16x1_i32x1_s(struct f16x1_i32x1_s x) {
+  return x;
+}
+
 // CHECK-LABEL: define{{.*}} { float, i32 } @f_f32x1_i32x1_s(float %0, i32 %1)
 struct f32x1_i32x1_s f_f32x1_i32x1_s(struct f32x1_i32x1_s x) {
   return x;
@@ -253,6 +304,16 @@ struct f32x4_s f_f32x4_s(struct f32x4_s x) {
   return x;
 }
 
+struct f16x5_s {
+  _Float16 a, b, c, d;
+  __fp16 e;
+};
+
+// CHECK-LABEL: define{{.*}} [2 x i64] @f_f16x5_s([2 x i64] %x.coerce)
+struct f16x5_s f_f16x5_s(struct f16x5_s x) {
+  return x;
+}
+
 /// ii. The structure with two double members is passed in a pair of available
 /// FARs. If no a pair of available FARs, it’s passed in GARs. A structure with
 /// one double member and one float member is same.
@@ -312,6 +373,16 @@ struct f32x2_i32x2_s f_f32x2_i32x2_s(struct f32x2_i32x2_s x) {
   return x;
 }
 
+struct f16x4_i32x2_s {
+  _Float16 a, b, c, d;
+  int32_t e, f;
+};
+
+// CHECK-LABEL: define{{.*}} [2 x i64] @f_f16x4_i32x2_s([2 x i64] %x.coerce)
+struct f16x4_i32x2_s f_f16x4_i32x2_s(struct f16x4_i32x2_s x) {
+  return x;
+}
+
 /// 3. WOA > 2 × GRLEN
 /// a. It’s passed by reference and are replaced in the argument list with the
 /// address. If there is an available GAR, the reference is passed in the GAR,

>From be069b92c390a988a4bc24433502626ed1e46a26 Mon Sep 17 00:00:00 2001
From: Ami-zhang <zhanglimin at loongson.cn>
Date: Mon, 28 Apr 2025 14:48:58 +0800
Subject: [PATCH 3/3] [LoongArch][BF16] Add support for the __bf16 type

The LoongArch psABI recently added __bf16 type support.
Now we can enable this new type in clang.

Currently, bf16 operations are automatically supported by promoting to float.
This patch adds bf16 support by ensuring that load extension / truncate store
operations are properly expanded.

And this commit implements support for bf16 truncate/extend on hard FP targets.
The extend operation is implemented by a shift just as in the standard legalization.
This requires custom lowering of the truncate libcall on hard float ABIs (the
normal libcall code path is used on soft ABIs).
---
 clang/docs/LanguageExtensions.rst             |    1 +
 clang/lib/Basic/Targets/LoongArch.h           |    5 +
 clang/test/CodeGen/LoongArch/bfloat-abi.c     |  611 ++++++++++
 .../test/CodeGen/LoongArch/bfloat-mangle.cpp  |   19 +
 .../LoongArch/LoongArchISelLowering.cpp       |   54 +-
 .../Target/LoongArch/LoongArchISelLowering.h  |    2 +
 llvm/test/CodeGen/LoongArch/bf16-promote.ll   |  172 +++
 llvm/test/CodeGen/LoongArch/bf16.ll           | 1048 +++++++++++++++++
 8 files changed, 1908 insertions(+), 4 deletions(-)
 create mode 100644 clang/test/CodeGen/LoongArch/bfloat-abi.c
 create mode 100644 clang/test/CodeGen/LoongArch/bfloat-mangle.cpp
 create mode 100644 llvm/test/CodeGen/LoongArch/bf16-promote.ll
 create mode 100644 llvm/test/CodeGen/LoongArch/bf16.ll

diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index 088f01a0199e4..4fa91b95c45e0 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -1009,6 +1009,7 @@ to ``float``; see below for more information on this emulation.
   * 64-bit ARM (AArch64)
   * RISC-V
   * X86 (when SSE2 is available)
+  * LoongArch
 
 (For X86, SSE2 is available on 64-bit and all recent 32-bit processors.)
 
diff --git a/clang/lib/Basic/Targets/LoongArch.h b/clang/lib/Basic/Targets/LoongArch.h
index 8a8c978ab89db..7e9affc98ac0f 100644
--- a/clang/lib/Basic/Targets/LoongArch.h
+++ b/clang/lib/Basic/Targets/LoongArch.h
@@ -49,6 +49,9 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
     HasFeatureLD_SEQ_SA = false;
     HasFeatureDiv32 = false;
     HasFeatureSCQ = false;
+    BFloat16Width = 16;
+    BFloat16Align = 16;
+    BFloat16Format = &llvm::APFloat::BFloat();
     LongDoubleWidth = 128;
     LongDoubleAlign = 128;
     LongDoubleFormat = &llvm::APFloat::IEEEquad();
@@ -99,6 +102,8 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
 
   bool hasBitIntType() const override { return true; }
 
+  bool hasBFloat16Type() const override { return true; }
+
   bool useFP16ConversionIntrinsics() const override { return false; }
 
   bool handleTargetFeatures(std::vector<std::string> &Features,
diff --git a/clang/test/CodeGen/LoongArch/bfloat-abi.c b/clang/test/CodeGen/LoongArch/bfloat-abi.c
new file mode 100644
index 0000000000000..9f0e25c17cc74
--- /dev/null
+++ b/clang/test/CodeGen/LoongArch/bfloat-abi.c
@@ -0,0 +1,611 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// RUN: %clang_cc1 -triple loongarch64 -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK-LA64
+// RUN: %clang_cc1 -triple loongarch32 -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK-LA32
+
+struct bfloat1 {
+  __bf16 a;
+};
+
+// CHECK-LA64-LABEL: define dso_local bfloat @h1
+// CHECK-LA64-SAME: (bfloat noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT1:%.*]], align 2
+// CHECK-LA64-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT1]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 2
+// CHECK-LA64-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { bfloat }, ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[TMP1]], align 2
+// CHECK-LA64-NEXT:    ret bfloat [[TMP2]]
+//
+// CHECK-LA32-LABEL: define dso_local bfloat @h1
+// CHECK-LA32-SAME: (bfloat noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT1:%.*]], align 2
+// CHECK-LA32-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT1]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 2
+// CHECK-LA32-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { bfloat }, ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[TMP1]], align 2
+// CHECK-LA32-NEXT:    ret bfloat [[TMP2]]
+//
+struct bfloat1 h1(__bf16 a) {
+  struct bfloat1 x;
+  x.a = a;
+  return x;
+}
+
+struct bfloat2 {
+  __bf16 a;
+  __bf16 b;
+};
+
+// CHECK-LA64-LABEL: define dso_local { bfloat, bfloat } @h2
+// CHECK-LA64-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT2:%.*]], align 2
+// CHECK-LA64-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT2]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 2
+// CHECK-LA64-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT2]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 2
+// CHECK-LA64-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { bfloat, bfloat }, ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    [[TMP3:%.*]] = load bfloat, ptr [[TMP2]], align 2
+// CHECK-LA64-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw { bfloat, bfloat }, ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    [[TMP5:%.*]] = load bfloat, ptr [[TMP4]], align 2
+// CHECK-LA64-NEXT:    [[TMP6:%.*]] = insertvalue { bfloat, bfloat } poison, bfloat [[TMP3]], 0
+// CHECK-LA64-NEXT:    [[TMP7:%.*]] = insertvalue { bfloat, bfloat } [[TMP6]], bfloat [[TMP5]], 1
+// CHECK-LA64-NEXT:    ret { bfloat, bfloat } [[TMP7]]
+//
+// CHECK-LA32-LABEL: define dso_local { bfloat, bfloat } @h2
+// CHECK-LA32-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT2:%.*]], align 2
+// CHECK-LA32-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT2]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 2
+// CHECK-LA32-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT2]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA32-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 2
+// CHECK-LA32-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { bfloat, bfloat }, ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    [[TMP3:%.*]] = load bfloat, ptr [[TMP2]], align 2
+// CHECK-LA32-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw { bfloat, bfloat }, ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA32-NEXT:    [[TMP5:%.*]] = load bfloat, ptr [[TMP4]], align 2
+// CHECK-LA32-NEXT:    [[TMP6:%.*]] = insertvalue { bfloat, bfloat } poison, bfloat [[TMP3]], 0
+// CHECK-LA32-NEXT:    [[TMP7:%.*]] = insertvalue { bfloat, bfloat } [[TMP6]], bfloat [[TMP5]], 1
+// CHECK-LA32-NEXT:    ret { bfloat, bfloat } [[TMP7]]
+//
+struct bfloat2 h2(__bf16 a, __bf16 b) {
+  struct bfloat2 x;
+  x.a = a;
+  x.b = b;
+  return x;
+}
+
+struct bfloat3 {
+  __bf16 a;
+  __bf16 b;
+  __bf16 c;
+};
+
+// CHECK-LA64-LABEL: define dso_local i64 @h3
+// CHECK-LA64-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]], bfloat noundef [[C:%.*]]) #[[ATTR0]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT3:%.*]], align 2
+// CHECK-LA64-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[C_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[RETVAL_COERCE:%.*]] = alloca i64, align 8
+// CHECK-LA64-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[C]], ptr [[C_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT3]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 2
+// CHECK-LA64-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT3]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 2
+// CHECK-LA64-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[C_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT3]], ptr [[RETVAL]], i32 0, i32 2
+// CHECK-LA64-NEXT:    store bfloat [[TMP2]], ptr [[C3]], align 2
+// CHECK-LA64-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL_COERCE]], ptr align 2 [[RETVAL]], i64 6, i1 false)
+// CHECK-LA64-NEXT:    [[TMP3:%.*]] = load i64, ptr [[RETVAL_COERCE]], align 8
+// CHECK-LA64-NEXT:    ret i64 [[TMP3]]
+//
+// CHECK-LA32-LABEL: define dso_local [2 x i32] @h3
+// CHECK-LA32-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]], bfloat noundef [[C:%.*]]) #[[ATTR0]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT3:%.*]], align 2
+// CHECK-LA32-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[C_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[RETVAL_COERCE:%.*]] = alloca [2 x i32], align 4
+// CHECK-LA32-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[C]], ptr [[C_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT3]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 2
+// CHECK-LA32-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT3]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA32-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 2
+// CHECK-LA32-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[C_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT3]], ptr [[RETVAL]], i32 0, i32 2
+// CHECK-LA32-NEXT:    store bfloat [[TMP2]], ptr [[C3]], align 2
+// CHECK-LA32-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[RETVAL_COERCE]], ptr align 2 [[RETVAL]], i32 6, i1 false)
+// CHECK-LA32-NEXT:    [[TMP3:%.*]] = load [2 x i32], ptr [[RETVAL_COERCE]], align 4
+// CHECK-LA32-NEXT:    ret [2 x i32] [[TMP3]]
+//
+struct bfloat3 h3(__bf16 a, __bf16 b, __bf16 c) {
+  struct bfloat3 x;
+  x.a = a;
+  x.b = b;
+  x.c = c;
+  return x;
+}
+
+struct bfloat4 {
+  __bf16 a;
+  __bf16 b;
+  __bf16 c;
+  __bf16 d;
+};
+
+// CHECK-LA64-LABEL: define dso_local i64 @h4
+// CHECK-LA64-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]], bfloat noundef [[C:%.*]], bfloat noundef [[D:%.*]]) #[[ATTR0]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT4:%.*]], align 2
+// CHECK-LA64-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[C_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[D_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[C]], ptr [[C_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[D]], ptr [[D_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT4]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 2
+// CHECK-LA64-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT4]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 2
+// CHECK-LA64-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[C_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT4]], ptr [[RETVAL]], i32 0, i32 2
+// CHECK-LA64-NEXT:    store bfloat [[TMP2]], ptr [[C3]], align 2
+// CHECK-LA64-NEXT:    [[TMP3:%.*]] = load bfloat, ptr [[D_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[D4:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT4]], ptr [[RETVAL]], i32 0, i32 3
+// CHECK-LA64-NEXT:    store bfloat [[TMP3]], ptr [[D4]], align 2
+// CHECK-LA64-NEXT:    [[TMP4:%.*]] = load i64, ptr [[RETVAL]], align 2
+// CHECK-LA64-NEXT:    ret i64 [[TMP4]]
+//
+// CHECK-LA32-LABEL: define dso_local [2 x i32] @h4
+// CHECK-LA32-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]], bfloat noundef [[C:%.*]], bfloat noundef [[D:%.*]]) #[[ATTR0]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT4:%.*]], align 2
+// CHECK-LA32-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[C_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[D_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[C]], ptr [[C_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[D]], ptr [[D_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT4]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 2
+// CHECK-LA32-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT4]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA32-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 2
+// CHECK-LA32-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[C_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT4]], ptr [[RETVAL]], i32 0, i32 2
+// CHECK-LA32-NEXT:    store bfloat [[TMP2]], ptr [[C3]], align 2
+// CHECK-LA32-NEXT:    [[TMP3:%.*]] = load bfloat, ptr [[D_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[D4:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT4]], ptr [[RETVAL]], i32 0, i32 3
+// CHECK-LA32-NEXT:    store bfloat [[TMP3]], ptr [[D4]], align 2
+// CHECK-LA32-NEXT:    [[TMP4:%.*]] = load [2 x i32], ptr [[RETVAL]], align 2
+// CHECK-LA32-NEXT:    ret [2 x i32] [[TMP4]]
+//
+struct bfloat4 h4(__bf16 a, __bf16 b, __bf16 c, __bf16 d) {
+  struct bfloat4 x;
+  x.a = a;
+  x.b = b;
+  x.c = c;
+  x.d = d;
+  return x;
+}
+
+struct floatbfloat {
+  float a;
+  __bf16 b;
+};
+
+// CHECK-LA64-LABEL: define dso_local { float, bfloat } @fh
+// CHECK-LA64-SAME: (float noundef [[A:%.*]], bfloat noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOATBFLOAT:%.*]], align 4
+// CHECK-LA64-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-LA64-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4
+// CHECK-LA64-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4
+// CHECK-LA64-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    store float [[TMP0]], ptr [[A1]], align 4
+// CHECK-LA64-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 4
+// CHECK-LA64-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, bfloat }, ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    [[TMP3:%.*]] = load float, ptr [[TMP2]], align 4
+// CHECK-LA64-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw { float, bfloat }, ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    [[TMP5:%.*]] = load bfloat, ptr [[TMP4]], align 4
+// CHECK-LA64-NEXT:    [[TMP6:%.*]] = insertvalue { float, bfloat } poison, float [[TMP3]], 0
+// CHECK-LA64-NEXT:    [[TMP7:%.*]] = insertvalue { float, bfloat } [[TMP6]], bfloat [[TMP5]], 1
+// CHECK-LA64-NEXT:    ret { float, bfloat } [[TMP7]]
+//
+// CHECK-LA32-LABEL: define dso_local { float, bfloat } @fh
+// CHECK-LA32-SAME: (float noundef [[A:%.*]], bfloat noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOATBFLOAT:%.*]], align 4
+// CHECK-LA32-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-LA32-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4
+// CHECK-LA32-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4
+// CHECK-LA32-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    store float [[TMP0]], ptr [[A1]], align 4
+// CHECK-LA32-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA32-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 4
+// CHECK-LA32-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, bfloat }, ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    [[TMP3:%.*]] = load float, ptr [[TMP2]], align 4
+// CHECK-LA32-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw { float, bfloat }, ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA32-NEXT:    [[TMP5:%.*]] = load bfloat, ptr [[TMP4]], align 4
+// CHECK-LA32-NEXT:    [[TMP6:%.*]] = insertvalue { float, bfloat } poison, float [[TMP3]], 0
+// CHECK-LA32-NEXT:    [[TMP7:%.*]] = insertvalue { float, bfloat } [[TMP6]], bfloat [[TMP5]], 1
+// CHECK-LA32-NEXT:    ret { float, bfloat } [[TMP7]]
+//
+struct floatbfloat fh(float a, __bf16 b) {
+  struct floatbfloat x;
+  x.a = a;
+  x.b = b;
+  return x;
+}
+
+struct floatbfloat2 {
+  float a;
+  __bf16 b;
+  __bf16 c;
+};
+
+// CHECK-LA64-LABEL: define dso_local i64 @fh2
+// CHECK-LA64-SAME: (float noundef [[A:%.*]], bfloat noundef [[B:%.*]], bfloat noundef [[C:%.*]]) #[[ATTR0]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOATBFLOAT2:%.*]], align 4
+// CHECK-LA64-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-LA64-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[C_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4
+// CHECK-LA64-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[C]], ptr [[C_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4
+// CHECK-LA64-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT2]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    store float [[TMP0]], ptr [[A1]], align 4
+// CHECK-LA64-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT2]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 4
+// CHECK-LA64-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[C_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT2]], ptr [[RETVAL]], i32 0, i32 2
+// CHECK-LA64-NEXT:    store bfloat [[TMP2]], ptr [[C3]], align 2
+// CHECK-LA64-NEXT:    [[TMP3:%.*]] = load i64, ptr [[RETVAL]], align 4
+// CHECK-LA64-NEXT:    ret i64 [[TMP3]]
+//
+// CHECK-LA32-LABEL: define dso_local [2 x i32] @fh2
+// CHECK-LA32-SAME: (float noundef [[A:%.*]], bfloat noundef [[B:%.*]], bfloat noundef [[C:%.*]]) #[[ATTR0]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOATBFLOAT2:%.*]], align 4
+// CHECK-LA32-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-LA32-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[C_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4
+// CHECK-LA32-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[C]], ptr [[C_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4
+// CHECK-LA32-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT2]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    store float [[TMP0]], ptr [[A1]], align 4
+// CHECK-LA32-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT2]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA32-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 4
+// CHECK-LA32-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[C_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT2]], ptr [[RETVAL]], i32 0, i32 2
+// CHECK-LA32-NEXT:    store bfloat [[TMP2]], ptr [[C3]], align 2
+// CHECK-LA32-NEXT:    [[TMP3:%.*]] = load [2 x i32], ptr [[RETVAL]], align 4
+// CHECK-LA32-NEXT:    ret [2 x i32] [[TMP3]]
+//
+struct floatbfloat2 fh2(float a, __bf16 b, __bf16 c) {
+  struct floatbfloat2 x;
+  x.a = a;
+  x.b = b;
+  x.c = c;
+  return x;
+}
+
+struct bfloatfloat {
+  __bf16 a;
+  float b;
+};
+
+// CHECK-LA64-LABEL: define dso_local { bfloat, float } @hf
+// CHECK-LA64-SAME: (bfloat noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOATFLOAT:%.*]], align 4
+// CHECK-LA64-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-LA64-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4
+// CHECK-LA64-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOATFLOAT]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 4
+// CHECK-LA64-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4
+// CHECK-LA64-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOATFLOAT]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    store float [[TMP1]], ptr [[B2]], align 4
+// CHECK-LA64-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { bfloat, float }, ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    [[TMP3:%.*]] = load bfloat, ptr [[TMP2]], align 4
+// CHECK-LA64-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw { bfloat, float }, ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    [[TMP5:%.*]] = load float, ptr [[TMP4]], align 4
+// CHECK-LA64-NEXT:    [[TMP6:%.*]] = insertvalue { bfloat, float } poison, bfloat [[TMP3]], 0
+// CHECK-LA64-NEXT:    [[TMP7:%.*]] = insertvalue { bfloat, float } [[TMP6]], float [[TMP5]], 1
+// CHECK-LA64-NEXT:    ret { bfloat, float } [[TMP7]]
+//
+// CHECK-LA32-LABEL: define dso_local { bfloat, float } @hf
+// CHECK-LA32-SAME: (bfloat noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOATFLOAT:%.*]], align 4
+// CHECK-LA32-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-LA32-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    store float [[B]], ptr [[B_ADDR]], align 4
+// CHECK-LA32-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOATFLOAT]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 4
+// CHECK-LA32-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR]], align 4
+// CHECK-LA32-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOATFLOAT]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA32-NEXT:    store float [[TMP1]], ptr [[B2]], align 4
+// CHECK-LA32-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { bfloat, float }, ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    [[TMP3:%.*]] = load bfloat, ptr [[TMP2]], align 4
+// CHECK-LA32-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw { bfloat, float }, ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA32-NEXT:    [[TMP5:%.*]] = load float, ptr [[TMP4]], align 4
+// CHECK-LA32-NEXT:    [[TMP6:%.*]] = insertvalue { bfloat, float } poison, bfloat [[TMP3]], 0
+// CHECK-LA32-NEXT:    [[TMP7:%.*]] = insertvalue { bfloat, float } [[TMP6]], float [[TMP5]], 1
+// CHECK-LA32-NEXT:    ret { bfloat, float } [[TMP7]]
+//
+struct bfloatfloat hf(__bf16 a, float b) {
+  struct bfloatfloat x;
+  x.a = a;
+  x.b = b;
+  return x;
+}
+
+struct bfloat2float {
+  __bf16 a;
+  __bf16 b;
+  float c;
+};
+
+// CHECK-LA64-LABEL: define dso_local i64 @h2f
+// CHECK-LA64-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]], float noundef [[C:%.*]]) #[[ATTR0]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT2FLOAT:%.*]], align 4
+// CHECK-LA64-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[C_ADDR:%.*]] = alloca float, align 4
+// CHECK-LA64-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    store float [[C]], ptr [[C_ADDR]], align 4
+// CHECK-LA64-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT2FLOAT]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 4
+// CHECK-LA64-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT2FLOAT]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 2
+// CHECK-LA64-NEXT:    [[TMP2:%.*]] = load float, ptr [[C_ADDR]], align 4
+// CHECK-LA64-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT2FLOAT]], ptr [[RETVAL]], i32 0, i32 2
+// CHECK-LA64-NEXT:    store float [[TMP2]], ptr [[C3]], align 4
+// CHECK-LA64-NEXT:    [[TMP3:%.*]] = load i64, ptr [[RETVAL]], align 4
+// CHECK-LA64-NEXT:    ret i64 [[TMP3]]
+//
+// CHECK-LA32-LABEL: define dso_local [2 x i32] @h2f
+// CHECK-LA32-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]], float noundef [[C:%.*]]) #[[ATTR0]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT2FLOAT:%.*]], align 4
+// CHECK-LA32-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[C_ADDR:%.*]] = alloca float, align 4
+// CHECK-LA32-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    store float [[C]], ptr [[C_ADDR]], align 4
+// CHECK-LA32-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT2FLOAT]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA32-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 4
+// CHECK-LA32-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT2FLOAT]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA32-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 2
+// CHECK-LA32-NEXT:    [[TMP2:%.*]] = load float, ptr [[C_ADDR]], align 4
+// CHECK-LA32-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT2FLOAT]], ptr [[RETVAL]], i32 0, i32 2
+// CHECK-LA32-NEXT:    store float [[TMP2]], ptr [[C3]], align 4
+// CHECK-LA32-NEXT:    [[TMP3:%.*]] = load [2 x i32], ptr [[RETVAL]], align 4
+// CHECK-LA32-NEXT:    ret [2 x i32] [[TMP3]]
+//
+struct bfloat2float h2f(__bf16 a, __bf16 b, float c) {
+  struct bfloat2float x;
+  x.a = a;
+  x.b = b;
+  x.c = c;
+  return x;
+}
+
+struct floatbfloat3 {
+  float a;
+  __bf16 b;
+  __bf16 c;
+  __bf16 d;
+};
+
+// CHECK-LA64-LABEL: define dso_local [2 x i64] @fh3
+// CHECK-LA64-SAME: (float noundef [[A:%.*]], bfloat noundef [[B:%.*]], bfloat noundef [[C:%.*]], bfloat noundef [[D:%.*]]) #[[ATTR0]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOATBFLOAT3:%.*]], align 4
+// CHECK-LA64-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-LA64-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[C_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[D_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[RETVAL_COERCE:%.*]] = alloca [2 x i64], align 8
+// CHECK-LA64-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4
+// CHECK-LA64-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[C]], ptr [[C_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[D]], ptr [[D_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4
+// CHECK-LA64-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT3]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    store float [[TMP0]], ptr [[A1]], align 4
+// CHECK-LA64-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT3]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 4
+// CHECK-LA64-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[C_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT3]], ptr [[RETVAL]], i32 0, i32 2
+// CHECK-LA64-NEXT:    store bfloat [[TMP2]], ptr [[C3]], align 2
+// CHECK-LA64-NEXT:    [[TMP3:%.*]] = load bfloat, ptr [[D_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[D4:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT3]], ptr [[RETVAL]], i32 0, i32 3
+// CHECK-LA64-NEXT:    store bfloat [[TMP3]], ptr [[D4]], align 4
+// CHECK-LA64-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL_COERCE]], ptr align 4 [[RETVAL]], i64 12, i1 false)
+// CHECK-LA64-NEXT:    [[TMP4:%.*]] = load [2 x i64], ptr [[RETVAL_COERCE]], align 8
+// CHECK-LA64-NEXT:    ret [2 x i64] [[TMP4]]
+//
+// CHECK-LA32-LABEL: define dso_local void @fh3
+// CHECK-LA32-SAME: (ptr dead_on_unwind noalias writable sret([[STRUCT_FLOATBFLOAT3:%.*]]) align 4 [[AGG_RESULT:%.*]], float noundef [[A:%.*]], bfloat noundef [[B:%.*]], bfloat noundef [[C:%.*]], bfloat noundef [[D:%.*]]) #[[ATTR0]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[RESULT_PTR:%.*]] = alloca ptr, align 4
+// CHECK-LA32-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-LA32-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[C_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[D_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 4
+// CHECK-LA32-NEXT:    store float [[A]], ptr [[A_ADDR]], align 4
+// CHECK-LA32-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[C]], ptr [[C_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[D]], ptr [[D_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4
+// CHECK-LA32-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT3]], ptr [[AGG_RESULT]], i32 0, i32 0
+// CHECK-LA32-NEXT:    store float [[TMP0]], ptr [[A1]], align 4
+// CHECK-LA32-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT3]], ptr [[AGG_RESULT]], i32 0, i32 1
+// CHECK-LA32-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 4
+// CHECK-LA32-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[C_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT3]], ptr [[AGG_RESULT]], i32 0, i32 2
+// CHECK-LA32-NEXT:    store bfloat [[TMP2]], ptr [[C3]], align 2
+// CHECK-LA32-NEXT:    [[TMP3:%.*]] = load bfloat, ptr [[D_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[D4:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOATBFLOAT3]], ptr [[AGG_RESULT]], i32 0, i32 3
+// CHECK-LA32-NEXT:    store bfloat [[TMP3]], ptr [[D4]], align 4
+// CHECK-LA32-NEXT:    ret void
+//
+struct floatbfloat3 fh3(float a, __bf16 b, __bf16 c, __bf16 d) {
+  struct floatbfloat3 x;
+  x.a = a;
+  x.b = b;
+  x.c = c;
+  x.d = d;
+  return x;
+}
+
+struct bfloat5 {
+  __bf16 a;
+  __bf16 b;
+  __bf16 c;
+  __bf16 d;
+  __bf16 e;
+};
+
+// CHECK-LA64-LABEL: define dso_local [2 x i64] @h5
+// CHECK-LA64-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]], bfloat noundef [[C:%.*]], bfloat noundef [[D:%.*]], bfloat noundef [[E:%.*]]) #[[ATTR0]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_BFLOAT5:%.*]], align 2
+// CHECK-LA64-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[C_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[D_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[E_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    [[RETVAL_COERCE:%.*]] = alloca [2 x i64], align 8
+// CHECK-LA64-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[C]], ptr [[C_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[D]], ptr [[D_ADDR]], align 2
+// CHECK-LA64-NEXT:    store bfloat [[E]], ptr [[E_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT5]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-LA64-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 2
+// CHECK-LA64-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT5]], ptr [[RETVAL]], i32 0, i32 1
+// CHECK-LA64-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 2
+// CHECK-LA64-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[C_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT5]], ptr [[RETVAL]], i32 0, i32 2
+// CHECK-LA64-NEXT:    store bfloat [[TMP2]], ptr [[C3]], align 2
+// CHECK-LA64-NEXT:    [[TMP3:%.*]] = load bfloat, ptr [[D_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[D4:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT5]], ptr [[RETVAL]], i32 0, i32 3
+// CHECK-LA64-NEXT:    store bfloat [[TMP3]], ptr [[D4]], align 2
+// CHECK-LA64-NEXT:    [[TMP4:%.*]] = load bfloat, ptr [[E_ADDR]], align 2
+// CHECK-LA64-NEXT:    [[E5:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT5]], ptr [[RETVAL]], i32 0, i32 4
+// CHECK-LA64-NEXT:    store bfloat [[TMP4]], ptr [[E5]], align 2
+// CHECK-LA64-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL_COERCE]], ptr align 2 [[RETVAL]], i64 10, i1 false)
+// CHECK-LA64-NEXT:    [[TMP5:%.*]] = load [2 x i64], ptr [[RETVAL_COERCE]], align 8
+// CHECK-LA64-NEXT:    ret [2 x i64] [[TMP5]]
+//
+// CHECK-LA32-LABEL: define dso_local void @h5
+// CHECK-LA32-SAME: (ptr dead_on_unwind noalias writable sret([[STRUCT_BFLOAT5:%.*]]) align 2 [[AGG_RESULT:%.*]], bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]], bfloat noundef [[C:%.*]], bfloat noundef [[D:%.*]], bfloat noundef [[E:%.*]]) #[[ATTR0]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[RESULT_PTR:%.*]] = alloca ptr, align 4
+// CHECK-LA32-NEXT:    [[A_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[C_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[D_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    [[E_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 4
+// CHECK-LA32-NEXT:    store bfloat [[A]], ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[C]], ptr [[C_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[D]], ptr [[D_ADDR]], align 2
+// CHECK-LA32-NEXT:    store bfloat [[E]], ptr [[E_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT5]], ptr [[AGG_RESULT]], i32 0, i32 0
+// CHECK-LA32-NEXT:    store bfloat [[TMP0]], ptr [[A1]], align 2
+// CHECK-LA32-NEXT:    [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT5]], ptr [[AGG_RESULT]], i32 0, i32 1
+// CHECK-LA32-NEXT:    store bfloat [[TMP1]], ptr [[B2]], align 2
+// CHECK-LA32-NEXT:    [[TMP2:%.*]] = load bfloat, ptr [[C_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[C3:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT5]], ptr [[AGG_RESULT]], i32 0, i32 2
+// CHECK-LA32-NEXT:    store bfloat [[TMP2]], ptr [[C3]], align 2
+// CHECK-LA32-NEXT:    [[TMP3:%.*]] = load bfloat, ptr [[D_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[D4:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT5]], ptr [[AGG_RESULT]], i32 0, i32 3
+// CHECK-LA32-NEXT:    store bfloat [[TMP3]], ptr [[D4]], align 2
+// CHECK-LA32-NEXT:    [[TMP4:%.*]] = load bfloat, ptr [[E_ADDR]], align 2
+// CHECK-LA32-NEXT:    [[E5:%.*]] = getelementptr inbounds nuw [[STRUCT_BFLOAT5]], ptr [[AGG_RESULT]], i32 0, i32 4
+// CHECK-LA32-NEXT:    store bfloat [[TMP4]], ptr [[E5]], align 2
+// CHECK-LA32-NEXT:    ret void
+//
+struct bfloat5 h5(__bf16 a, __bf16 b, __bf16 c, __bf16 d, __bf16 e) {
+  struct bfloat5 x;
+  x.a = a;
+  x.b = b;
+  x.c = c;
+  x.d = d;
+  x.e = e;
+  return x;
+}
diff --git a/clang/test/CodeGen/LoongArch/bfloat-mangle.cpp b/clang/test/CodeGen/LoongArch/bfloat-mangle.cpp
new file mode 100644
index 0000000000000..07b91b65d0f7d
--- /dev/null
+++ b/clang/test/CodeGen/LoongArch/bfloat-mangle.cpp
@@ -0,0 +1,19 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// RUN: %clang_cc1 -triple loongarch64 -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK-LA64
+// RUN: %clang_cc1 -triple loongarch32 -emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK-LA32
+
+// CHECK-LA64-LABEL: define dso_local void @_Z3fooDF16b
+// CHECK-LA64-SAME: (bfloat noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-LA64-NEXT:  entry:
+// CHECK-LA64-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA64-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA64-NEXT:    ret void
+//
+// CHECK-LA32-LABEL: define dso_local void @_Z3fooDF16b
+// CHECK-LA32-SAME: (bfloat noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-LA32-NEXT:  entry:
+// CHECK-LA32-NEXT:    [[B_ADDR:%.*]] = alloca bfloat, align 2
+// CHECK-LA32-NEXT:    store bfloat [[B]], ptr [[B_ADDR]], align 2
+// CHECK-LA32-NEXT:    ret void
+//
+void foo(__bf16 b) {}
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index c7b2a1a8ffbf8..c1f593e7f0a74 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -182,6 +182,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
   if (Subtarget.hasBasicF()) {
     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+    setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::bf16, Expand);
+    setTruncStoreAction(MVT::f32, MVT::bf16, Expand);
     setCondCodeAction(FPCCToExpand, MVT::f32, Expand);
 
     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
@@ -203,6 +205,9 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
                        Subtarget.isSoftFPABI() ? LibCall : Custom);
     setOperationAction(ISD::FP_TO_FP16, MVT::f32,
                        Subtarget.isSoftFPABI() ? LibCall : Custom);
+    setOperationAction(ISD::BF16_TO_FP, MVT::f32, Custom);
+    setOperationAction(ISD::FP_TO_BF16, MVT::f32,
+                       Subtarget.isSoftFPABI() ? LibCall : Custom);
 
     if (Subtarget.is64Bit())
       setOperationAction(ISD::FRINT, MVT::f32, Legal);
@@ -221,6 +226,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
   if (Subtarget.hasBasicD()) {
     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+    setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::bf16, Expand);
+    setTruncStoreAction(MVT::f64, MVT::bf16, Expand);
     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
     setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
@@ -243,6 +250,9 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
     setOperationAction(ISD::FP_TO_FP16, MVT::f64,
                        Subtarget.isSoftFPABI() ? LibCall : Custom);
+    setOperationAction(ISD::BF16_TO_FP, MVT::f64, Custom);
+    setOperationAction(ISD::FP_TO_BF16, MVT::f64,
+                       Subtarget.isSoftFPABI() ? LibCall : Custom);
 
     if (Subtarget.is64Bit())
       setOperationAction(ISD::FRINT, MVT::f64, Legal);
@@ -497,6 +507,10 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
     return lowerFP_TO_FP16(Op, DAG);
   case ISD::FP16_TO_FP:
     return lowerFP16_TO_FP(Op, DAG);
+  case ISD::FP_TO_BF16:
+    return lowerFP_TO_BF16(Op, DAG);
+  case ISD::BF16_TO_FP:
+    return lowerBF16_TO_FP(Op, DAG);
   }
   return SDValue();
 }
@@ -2283,6 +2297,36 @@ SDValue LoongArchTargetLowering::lowerFP16_TO_FP(SDValue Op,
   return Res;
 }
 
+SDValue LoongArchTargetLowering::lowerFP_TO_BF16(SDValue Op,
+                                                 SelectionDAG &DAG) const {
+  assert(Subtarget.hasBasicF() && "Unexpected custom legalization");
+  SDLoc DL(Op);
+  MakeLibCallOptions CallOptions;
+  RTLIB::Libcall LC =
+      RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::bf16);
+  SDValue Res =
+      makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
+  if (Subtarget.is64Bit())
+    return DAG.getNode(LoongArchISD::MOVFR2GR_S_LA64, DL, MVT::i64, Res);
+  return DAG.getBitcast(MVT::i32, Res);
+}
+
+SDValue LoongArchTargetLowering::lowerBF16_TO_FP(SDValue Op,
+                                                 SelectionDAG &DAG) const {
+  assert(Subtarget.hasBasicF() && "Unexpected custom legalization");
+  MVT VT = Op.getSimpleValueType();
+  SDLoc DL(Op);
+  Op = DAG.getNode(
+      ISD::SHL, DL, Op.getOperand(0).getValueType(), Op.getOperand(0),
+      DAG.getShiftAmountConstant(16, Op.getOperand(0).getValueType(), DL));
+  SDValue Res = Subtarget.is64Bit() ? DAG.getNode(LoongArchISD::MOVGR2FR_W_LA64,
+                                                  DL, MVT::f32, Op)
+                                    : DAG.getBitcast(MVT::f32, Op);
+  if (VT != MVT::f32)
+    return DAG.getNode(ISD::FP_EXTEND, DL, VT, Res);
+  return Res;
+}
+
 static bool isConstantOrUndef(const SDValue Op) {
   if (Op->isUndef())
     return true;
@@ -7714,8 +7758,9 @@ bool LoongArchTargetLowering::splitValueIntoRegisterParts(
   bool IsABIRegCopy = CC.has_value();
   EVT ValueVT = Val.getValueType();
 
-  if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
-    // Cast the f16 to i16, extend to i32, pad with ones to make a float
+  if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
+      PartVT == MVT::f32) {
+    // Cast the [b]f16 to i16, extend to i32, pad with ones to make a float
     // nan, and cast to f32.
     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
@@ -7734,10 +7779,11 @@ SDValue LoongArchTargetLowering::joinRegisterPartsIntoValue(
     MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
   bool IsABIRegCopy = CC.has_value();
 
-  if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
+  if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
+      PartVT == MVT::f32) {
     SDValue Val = Parts[0];
 
-    // Cast the f32 to i32, truncate to i16, and cast back to f16.
+    // Cast the f32 to i32, truncate to i16, and cast back to [b]f16.
     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
     Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 8c00ec75db94b..158d5471e555a 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -363,6 +363,8 @@ class LoongArchTargetLowering : public TargetLowering {
   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerFP_TO_BF16(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerBF16_TO_FP(SDValue Op, SelectionDAG &DAG) const;
 
   bool isFPImmLegal(const APFloat &Imm, EVT VT,
                     bool ForCodeSize) const override;
diff --git a/llvm/test/CodeGen/LoongArch/bf16-promote.ll b/llvm/test/CodeGen/LoongArch/bf16-promote.ll
new file mode 100644
index 0000000000000..42651eb53acea
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/bf16-promote.ll
@@ -0,0 +1,172 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=loongarch64 -mattr=+d -target-abi=lp64d < %s | FileCheck --check-prefixes=CHECK,LA64 %s
+; RUN: llc -mtriple=loongarch32 -mattr=+d -target-abi=ilp32d < %s | FileCheck --check-prefixes=CHECK,LA32 %s
+
+define void @test_load_store(ptr %p, ptr %q) nounwind {
+; CHECK-LABEL: test_load_store:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.h $a0, $a0, 0
+; CHECK-NEXT:    st.h $a0, $a1, 0
+; CHECK-NEXT:    ret
+  %a = load bfloat, ptr %p
+  store bfloat %a, ptr %q
+  ret void
+}
+
+define float @test_fpextend_float(ptr %p) nounwind {
+; LA64-LABEL: test_fpextend_float:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.hu $a0, $a0, 0
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    ret
+;
+; LA32-LABEL: test_fpextend_float:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.hu $a0, $a0, 0
+; LA32-NEXT:    slli.w $a0, $a0, 16
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    ret
+  %a = load bfloat, ptr %p
+  %r = fpext bfloat %a to float
+  ret float %r
+}
+
+define double @test_fpextend_double(ptr %p) nounwind {
+; LA64-LABEL: test_fpextend_double:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.hu $a0, $a0, 0
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    fcvt.d.s $fa0, $fa0
+; LA64-NEXT:    ret
+;
+; LA32-LABEL: test_fpextend_double:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.hu $a0, $a0, 0
+; LA32-NEXT:    slli.w $a0, $a0, 16
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    fcvt.d.s $fa0, $fa0
+; LA32-NEXT:    ret
+  %a = load bfloat, ptr %p
+  %r = fpext bfloat %a to double
+  ret double %r
+}
+
+define void @test_fptrunc_float(float %f, ptr %p) nounwind {
+; LA64-LABEL: test_fptrunc_float:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT:    move $fp, $a0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    st.h $a0, $fp, 0
+; LA64-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+;
+; LA32-LABEL: test_fptrunc_float:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    bl __truncsfbf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    st.h $a0, $fp, 0
+; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+  %a = fptrunc float %f to bfloat
+  store bfloat %a, ptr %p
+  ret void
+}
+
+define void @test_fptrunc_double(double %d, ptr %p) nounwind {
+; LA64-LABEL: test_fptrunc_double:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT:    move $fp, $a0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncdfbf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    st.h $a0, $fp, 0
+; LA64-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+;
+; LA32-LABEL: test_fptrunc_double:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    bl __truncdfbf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    st.h $a0, $fp, 0
+; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+  %a = fptrunc double %d to bfloat
+  store bfloat %a, ptr %p
+  ret void
+}
+
+define void @test_fadd(ptr %p, ptr %q) nounwind {
+; LA64-LABEL: test_fadd:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT:    ld.hu $a1, $a1, 0
+; LA64-NEXT:    move $fp, $a0
+; LA64-NEXT:    ld.hu $a0, $a0, 0
+; LA64-NEXT:    slli.d $a1, $a1, 16
+; LA64-NEXT:    movgr2fr.w $fa0, $a1
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    movgr2fr.w $fa1, $a0
+; LA64-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    st.h $a0, $fp, 0
+; LA64-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+;
+; LA32-LABEL: test_fadd:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    ld.hu $a1, $a1, 0
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    ld.hu $a0, $a0, 0
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    movgr2fr.w $fa0, $a1
+; LA32-NEXT:    slli.w $a0, $a0, 16
+; LA32-NEXT:    movgr2fr.w $fa1, $a0
+; LA32-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32-NEXT:    bl __truncsfbf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    st.h $a0, $fp, 0
+; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+  %a = load bfloat, ptr %p
+  %b = load bfloat, ptr %q
+  %r = fadd bfloat %a, %b
+  store bfloat %r, ptr %p
+  ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/bf16.ll b/llvm/test/CodeGen/LoongArch/bf16.ll
new file mode 100644
index 0000000000000..e580bcc69f52b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/bf16.ll
@@ -0,0 +1,1048 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+;; For `double` parameters and return values, compiling on loongarch32 with `-mattr=+d` and
+;; `-target-abi=ilp32s` is incompatible, resulting in the error 'Passing f64 with GPR on LA32 is undefined'.
+;; Therefore, such case is currently skipped in testing.
+; RUN: llc -mtriple=loongarch32 -verify-machineinstrs < %s | FileCheck %s -check-prefix=LA32
+; RUN: llc -mtriple=loongarch64 -verify-machineinstrs < %s | FileCheck %s -check-prefix=LA64
+; RUN: llc -mtriple=loongarch32 -mattr=+f -target-abi=ilp32s -verify-machineinstrs < %s | FileCheck %s -check-prefix=LA32F-ILP32S
+; RUN: llc -mtriple=loongarch32 -mattr=+f -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s -check-prefix=LA32F-ILP32D
+; RUN: llc -mtriple=loongarch32 -mattr=+d -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s -check-prefix=LA32D-ILP32D
+; RUN: llc -mtriple=loongarch64 -mattr=+f -target-abi=lp64s -verify-machineinstrs < %s | FileCheck %s -check-prefix=LA64F-LP64S
+; RUN: llc -mtriple=loongarch64 -mattr=+f -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s -check-prefix=LA64F-LP64D
+; RUN: llc -mtriple=loongarch64 -mattr=+d -target-abi=lp64s -verify-machineinstrs < %s  | FileCheck %s -check-prefix=LA64D-LP64S
+; RUN: llc -mtriple=loongarch64 -mattr=+d -target-abi=lp64d -verify-machineinstrs < %s  | FileCheck %s -check-prefix=LA64D-LP64D
+
+define bfloat @float_to_bfloat(float %a) nounwind {
+; LA32-LABEL: float_to_bfloat:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    bl __truncsfbf2
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: float_to_bfloat:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: float_to_bfloat:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    bl __truncsfbf2
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: float_to_bfloat:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    bl __truncsfbf2
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: float_to_bfloat:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    bl __truncsfbf2
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: float_to_bfloat:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: float_to_bfloat:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: float_to_bfloat:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: float_to_bfloat:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = fptrunc float %a to bfloat
+  ret bfloat %1
+}
+
+define bfloat @double_to_bfloat(double %a) nounwind {
+; LA32-LABEL: double_to_bfloat:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    bl __truncdfbf2
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: double_to_bfloat:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncdfbf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: double_to_bfloat:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    bl __truncdfbf2
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: double_to_bfloat:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    bl __truncdfbf2
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: double_to_bfloat:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    bl __truncdfbf2
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: double_to_bfloat:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncdfbf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: double_to_bfloat:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncdfbf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: double_to_bfloat:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncdfbf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: double_to_bfloat:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncdfbf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = fptrunc double %a to bfloat
+  ret bfloat %1
+}
+
+define float @bfloat_to_float(bfloat %a) nounwind {
+; LA32-LABEL: bfloat_to_float:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a0, $a0, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bfloat_to_float:
+; LA64:       # %bb.0:
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: bfloat_to_float:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    slli.w $a0, $a0, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: bfloat_to_float:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    slli.w $a0, $a0, 16
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: bfloat_to_float:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    slli.w $a0, $a0, 16
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: bfloat_to_float:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    slli.d $a0, $a0, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: bfloat_to_float:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    slli.d $a0, $a0, 16
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: bfloat_to_float:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    slli.d $a0, $a0, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: bfloat_to_float:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    slli.d $a0, $a0, 16
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    ret
+  %1 = fpext bfloat %a to float
+  ret float %1
+}
+
+define double @bfloat_to_double(bfloat %a) nounwind {
+; LA32-LABEL: bfloat_to_double:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    slli.w $a0, $a0, 16
+; LA32-NEXT:    bl __extendsfdf2
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bfloat_to_double:
+; LA64:       # %bb.0:
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    fcvt.d.s $fa0, $fa0
+; LA64-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: bfloat_to_double:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    slli.w $a0, $a0, 16
+; LA32F-ILP32S-NEXT:    bl __extendsfdf2
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: bfloat_to_double:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    slli.w $a0, $a0, 16
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    bl __extendsfdf2
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: bfloat_to_double:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    slli.w $a0, $a0, 16
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    fcvt.d.s $fa0, $fa0
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: bfloat_to_double:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    slli.d $a0, $a0, 16
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    fcvt.d.s $fa0, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: bfloat_to_double:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    slli.d $a0, $a0, 16
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    fcvt.d.s $fa0, $fa0
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: bfloat_to_double:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    slli.d $a0, $a0, 16
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    fcvt.d.s $fa0, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: bfloat_to_double:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    slli.d $a0, $a0, 16
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    fcvt.d.s $fa0, $fa0
+; LA64D-LP64D-NEXT:    ret
+  %1 = fpext bfloat %a to double
+  ret double %1
+}
+
+define bfloat @i16_to_bfloat(i16 %a) nounwind {
+; LA32-LABEL: i16_to_bfloat:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: i16_to_bfloat:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: i16_to_bfloat:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: i16_to_bfloat:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: i16_to_bfloat:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: i16_to_bfloat:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: i16_to_bfloat:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: i16_to_bfloat:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: i16_to_bfloat:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    ret
+  %1 = bitcast i16 %a to bfloat
+  ret bfloat %1
+}
+
+define i16 @bfloat_to_i16(bfloat %a) nounwind {
+; LA32-LABEL: bfloat_to_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bfloat_to_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: bfloat_to_i16:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: bfloat_to_i16:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: bfloat_to_i16:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: bfloat_to_i16:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: bfloat_to_i16:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: bfloat_to_i16:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: bfloat_to_i16:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    ret
+  %1 = bitcast bfloat %a to i16
+  ret i16 %1
+}
+
+define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
+; LA32-LABEL: bfloat_add:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    slli.w $a0, $a0, 16
+; LA32-NEXT:    slli.w $a1, $a1, 16
+; LA32-NEXT:    bl __addsf3
+; LA32-NEXT:    bl __truncsfbf2
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bfloat_add:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    movfr2gr.s $a1, $fa1
+; LA64-NEXT:    slli.d $a1, $a1, 16
+; LA64-NEXT:    movgr2fr.w $fa0, $a1
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    movgr2fr.w $fa1, $a0
+; LA64-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: bfloat_add:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    slli.w $a1, $a1, 16
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a1
+; LA32F-ILP32S-NEXT:    slli.w $a0, $a0, 16
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa1, $a0
+; LA32F-ILP32S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    bl __truncsfbf2
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: bfloat_add:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a1, $fa1
+; LA32F-ILP32D-NEXT:    slli.w $a1, $a1, 16
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a1
+; LA32F-ILP32D-NEXT:    slli.w $a0, $a0, 16
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa1, $a0
+; LA32F-ILP32D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32F-ILP32D-NEXT:    bl __truncsfbf2
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: bfloat_add:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a1, $fa1
+; LA32D-ILP32D-NEXT:    slli.w $a1, $a1, 16
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a1
+; LA32D-ILP32D-NEXT:    slli.w $a0, $a0, 16
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa1, $a0
+; LA32D-ILP32D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32D-ILP32D-NEXT:    bl __truncsfbf2
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: bfloat_add:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    slli.d $a1, $a1, 16
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a1
+; LA64F-LP64S-NEXT:    slli.d $a0, $a0, 16
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa1, $a0
+; LA64F-LP64S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: bfloat_add:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    movfr2gr.s $a1, $fa1
+; LA64F-LP64D-NEXT:    slli.d $a1, $a1, 16
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a1
+; LA64F-LP64D-NEXT:    slli.d $a0, $a0, 16
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa1, $a0
+; LA64F-LP64D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: bfloat_add:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    slli.d $a1, $a1, 16
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a1
+; LA64D-LP64S-NEXT:    slli.d $a0, $a0, 16
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa1, $a0
+; LA64D-LP64S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: bfloat_add:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    movfr2gr.s $a1, $fa1
+; LA64D-LP64D-NEXT:    slli.d $a1, $a1, 16
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a1
+; LA64D-LP64D-NEXT:    slli.d $a0, $a0, 16
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa1, $a0
+; LA64D-LP64D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = fadd bfloat %a, %b
+  ret bfloat %1
+}
+
+define bfloat @bfloat_load(ptr %a) nounwind {
+; LA32-LABEL: bfloat_load:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    ld.h $a1, $a0, 0
+; LA32-NEXT:    ld.h $a2, $a0, 6
+; LA32-NEXT:    slli.w $a0, $a1, 16
+; LA32-NEXT:    slli.w $a1, $a2, 16
+; LA32-NEXT:    bl __addsf3
+; LA32-NEXT:    bl __truncsfbf2
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bfloat_load:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    ld.hu $a1, $a0, 6
+; LA64-NEXT:    ld.hu $a0, $a0, 0
+; LA64-NEXT:    slli.d $a1, $a1, 16
+; LA64-NEXT:    movgr2fr.w $fa0, $a1
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    movgr2fr.w $fa1, $a0
+; LA64-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: bfloat_load:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    ld.hu $a1, $a0, 6
+; LA32F-ILP32S-NEXT:    ld.hu $a0, $a0, 0
+; LA32F-ILP32S-NEXT:    slli.w $a1, $a1, 16
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a1
+; LA32F-ILP32S-NEXT:    slli.w $a0, $a0, 16
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa1, $a0
+; LA32F-ILP32S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    bl __truncsfbf2
+; LA32F-ILP32S-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32S-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: bfloat_load:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    ld.hu $a1, $a0, 6
+; LA32F-ILP32D-NEXT:    ld.hu $a0, $a0, 0
+; LA32F-ILP32D-NEXT:    slli.w $a1, $a1, 16
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a1
+; LA32F-ILP32D-NEXT:    slli.w $a0, $a0, 16
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa1, $a0
+; LA32F-ILP32D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32F-ILP32D-NEXT:    bl __truncsfbf2
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32F-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: bfloat_load:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    ld.hu $a1, $a0, 6
+; LA32D-ILP32D-NEXT:    ld.hu $a0, $a0, 0
+; LA32D-ILP32D-NEXT:    slli.w $a1, $a1, 16
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a1
+; LA32D-ILP32D-NEXT:    slli.w $a0, $a0, 16
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa1, $a0
+; LA32D-ILP32D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32D-ILP32D-NEXT:    bl __truncsfbf2
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    lu12i.w $a1, -16
+; LA32D-ILP32D-NEXT:    or $a0, $a0, $a1
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: bfloat_load:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    ld.hu $a1, $a0, 6
+; LA64F-LP64S-NEXT:    ld.hu $a0, $a0, 0
+; LA64F-LP64S-NEXT:    slli.d $a1, $a1, 16
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a1
+; LA64F-LP64S-NEXT:    slli.d $a0, $a0, 16
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa1, $a0
+; LA64F-LP64S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: bfloat_load:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    ld.hu $a1, $a0, 6
+; LA64F-LP64D-NEXT:    ld.hu $a0, $a0, 0
+; LA64F-LP64D-NEXT:    slli.d $a1, $a1, 16
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a1
+; LA64F-LP64D-NEXT:    slli.d $a0, $a0, 16
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa1, $a0
+; LA64F-LP64D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64F-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: bfloat_load:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    ld.hu $a1, $a0, 6
+; LA64D-LP64S-NEXT:    ld.hu $a0, $a0, 0
+; LA64D-LP64S-NEXT:    slli.d $a1, $a1, 16
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a1
+; LA64D-LP64S-NEXT:    slli.d $a0, $a0, 16
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa1, $a0
+; LA64D-LP64S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64S-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: bfloat_load:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    ld.hu $a1, $a0, 6
+; LA64D-LP64D-NEXT:    ld.hu $a0, $a0, 0
+; LA64D-LP64D-NEXT:    slli.d $a1, $a1, 16
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a1
+; LA64D-LP64D-NEXT:    slli.d $a0, $a0, 16
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa1, $a0
+; LA64D-LP64D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    lu12i.w $a1, -16
+; LA64D-LP64D-NEXT:    or $a0, $a0, $a1
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = load bfloat, ptr %a
+  %2 = getelementptr bfloat, ptr %a, i32 3
+  %3 = load bfloat, ptr %2
+  %4 = fadd bfloat %1, %3
+  ret bfloat %4
+}
+
+define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
+; LA32-LABEL: bfloat_store:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    move $fp, $a0
+; LA32-NEXT:    slli.w $a0, $a1, 16
+; LA32-NEXT:    slli.w $a1, $a2, 16
+; LA32-NEXT:    bl __addsf3
+; LA32-NEXT:    bl __truncsfbf2
+; LA32-NEXT:    st.h $a0, $fp, 0
+; LA32-NEXT:    st.h $a0, $fp, 16
+; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: bfloat_store:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT:    move $fp, $a0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    movfr2gr.s $a1, $fa1
+; LA64-NEXT:    slli.d $a1, $a1, 16
+; LA64-NEXT:    movgr2fr.w $fa0, $a1
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    movgr2fr.w $fa1, $a0
+; LA64-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    st.h $a0, $fp, 0
+; LA64-NEXT:    st.h $a0, $fp, 16
+; LA64-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+;
+; LA32F-ILP32S-LABEL: bfloat_store:
+; LA32F-ILP32S:       # %bb.0:
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32S-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32S-NEXT:    move $fp, $a0
+; LA32F-ILP32S-NEXT:    slli.w $a0, $a2, 16
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-ILP32S-NEXT:    slli.w $a0, $a1, 16
+; LA32F-ILP32S-NEXT:    movgr2fr.w $fa1, $a0
+; LA32F-ILP32S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32F-ILP32S-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32S-NEXT:    bl __truncsfbf2
+; LA32F-ILP32S-NEXT:    st.h $a0, $fp, 0
+; LA32F-ILP32S-NEXT:    st.h $a0, $fp, 16
+; LA32F-ILP32S-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32S-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32S-NEXT:    ret
+;
+; LA32F-ILP32D-LABEL: bfloat_store:
+; LA32F-ILP32D:       # %bb.0:
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32F-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-ILP32D-NEXT:    move $fp, $a0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a1, $fa1
+; LA32F-ILP32D-NEXT:    slli.w $a1, $a1, 16
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa0, $a1
+; LA32F-ILP32D-NEXT:    slli.w $a0, $a0, 16
+; LA32F-ILP32D-NEXT:    movgr2fr.w $fa1, $a0
+; LA32F-ILP32D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32F-ILP32D-NEXT:    bl __truncsfbf2
+; LA32F-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-ILP32D-NEXT:    st.h $a0, $fp, 0
+; LA32F-ILP32D-NEXT:    st.h $a0, $fp, 16
+; LA32F-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32F-ILP32D-NEXT:    ret
+;
+; LA32D-ILP32D-LABEL: bfloat_store:
+; LA32D-ILP32D:       # %bb.0:
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-ILP32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-ILP32D-NEXT:    move $fp, $a0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a1, $fa1
+; LA32D-ILP32D-NEXT:    slli.w $a1, $a1, 16
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa0, $a1
+; LA32D-ILP32D-NEXT:    slli.w $a0, $a0, 16
+; LA32D-ILP32D-NEXT:    movgr2fr.w $fa1, $a0
+; LA32D-ILP32D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA32D-ILP32D-NEXT:    bl __truncsfbf2
+; LA32D-ILP32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-ILP32D-NEXT:    st.h $a0, $fp, 0
+; LA32D-ILP32D-NEXT:    st.h $a0, $fp, 16
+; LA32D-ILP32D-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-ILP32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-ILP32D-NEXT:    ret
+;
+; LA64F-LP64S-LABEL: bfloat_store:
+; LA64F-LP64S:       # %bb.0:
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64S-NEXT:    move $fp, $a0
+; LA64F-LP64S-NEXT:    slli.d $a0, $a2, 16
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-LP64S-NEXT:    slli.d $a0, $a1, 16
+; LA64F-LP64S-NEXT:    movgr2fr.w $fa1, $a0
+; LA64F-LP64S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64F-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64F-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64S-NEXT:    st.h $a0, $fp, 0
+; LA64F-LP64S-NEXT:    st.h $a0, $fp, 16
+; LA64F-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64S-NEXT:    ret
+;
+; LA64F-LP64D-LABEL: bfloat_store:
+; LA64F-LP64D:       # %bb.0:
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64F-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-LP64D-NEXT:    move $fp, $a0
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    movfr2gr.s $a1, $fa1
+; LA64F-LP64D-NEXT:    slli.d $a1, $a1, 16
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa0, $a1
+; LA64F-LP64D-NEXT:    slli.d $a0, $a0, 16
+; LA64F-LP64D-NEXT:    movgr2fr.w $fa1, $a0
+; LA64F-LP64D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64F-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64F-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64F-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-LP64D-NEXT:    st.h $a0, $fp, 0
+; LA64F-LP64D-NEXT:    st.h $a0, $fp, 16
+; LA64F-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64F-LP64D-NEXT:    ret
+;
+; LA64D-LP64S-LABEL: bfloat_store:
+; LA64D-LP64S:       # %bb.0:
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64S-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64S-NEXT:    move $fp, $a0
+; LA64D-LP64S-NEXT:    slli.d $a0, $a2, 16
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-LP64S-NEXT:    slli.d $a0, $a1, 16
+; LA64D-LP64S-NEXT:    movgr2fr.w $fa1, $a0
+; LA64D-LP64S-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64D-LP64S-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64S-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64D-LP64S-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64S-NEXT:    st.h $a0, $fp, 0
+; LA64D-LP64S-NEXT:    st.h $a0, $fp, 16
+; LA64D-LP64S-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64S-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64S-NEXT:    ret
+;
+; LA64D-LP64D-LABEL: bfloat_store:
+; LA64D-LP64D:       # %bb.0:
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, -16
+; LA64D-LP64D-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-LP64D-NEXT:    move $fp, $a0
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    movfr2gr.s $a1, $fa1
+; LA64D-LP64D-NEXT:    slli.d $a1, $a1, 16
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa0, $a1
+; LA64D-LP64D-NEXT:    slli.d $a0, $a0, 16
+; LA64D-LP64D-NEXT:    movgr2fr.w $fa1, $a0
+; LA64D-LP64D-NEXT:    fadd.s $fa0, $fa1, $fa0
+; LA64D-LP64D-NEXT:    pcaddu18i $ra, %call36(__truncsfbf2)
+; LA64D-LP64D-NEXT:    jirl $ra, $ra, 0
+; LA64D-LP64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-LP64D-NEXT:    st.h $a0, $fp, 0
+; LA64D-LP64D-NEXT:    st.h $a0, $fp, 16
+; LA64D-LP64D-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-LP64D-NEXT:    addi.d $sp, $sp, 16
+; LA64D-LP64D-NEXT:    ret
+  %1 = fadd bfloat %b, %c
+  store bfloat %1, ptr %a
+  %2 = getelementptr bfloat, ptr %a, i32 8
+  store bfloat %1, ptr %2
+  ret void
+}



More information about the llvm-commits mailing list