[llvm] [LoongArch] Pass 'half' in the lower 16 bits of an f32 value when F extension is enabled (PR #109368)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 19 20:58:29 PDT 2024
https://github.com/heiher updated https://github.com/llvm/llvm-project/pull/109368
>From 235cada74b1abbe872af8fd40e4a3f1989d3c551 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Thu, 19 Sep 2024 13:59:08 +0800
Subject: [PATCH 1/2] [LoongArch] Pass 'half' in the lower 16 bits of an f32
value when F extension is enabled
LoongArch currently lacks a hardware extension for the fp16 data type, and the
ABI documentation does not explicitly define how to handle fp16. Future revsions
of the LoongArch specification will include conventions to address fp16 requirements.
Previously, we maintained the 'half' type in its 16-bit format between operations.
Regardless of whether the F extension is enabled, the value would be passed in the
lower 16 bits of a GPR in its 'half' format.
With this patch, depending on the ABI in use, the value will be passed either in
an FPR or a GPR in 'half' format. This ensures consistency with the bits location
when the fp16 hardware extension is enabled.
---
.../LoongArch/LoongArchISelLowering.cpp | 146 ++-
.../Target/LoongArch/LoongArchISelLowering.h | 24 +
.../CodeGen/LoongArch/calling-conv-half.ll | 924 ++++++++++++++++++
llvm/test/CodeGen/LoongArch/fp16-promote.ll | 192 ++--
4 files changed, 1214 insertions(+), 72 deletions(-)
create mode 100644 llvm/test/CodeGen/LoongArch/calling-conv-half.ll
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index bfafb331752108..78473e4bc4287a 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -181,8 +181,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
setOperationAction(ISD::FPOW, MVT::f32, Expand);
setOperationAction(ISD::FREM, MVT::f32, Expand);
- setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
- setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
+ setOperationAction(ISD::FP16_TO_FP, MVT::f32, Custom);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
if (Subtarget.is64Bit())
setOperationAction(ISD::FRINT, MVT::f32, Legal);
@@ -219,7 +219,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FPOW, MVT::f64, Expand);
setOperationAction(ISD::FREM, MVT::f64, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
- setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom);
if (Subtarget.is64Bit())
setOperationAction(ISD::FRINT, MVT::f64, Legal);
@@ -427,6 +427,10 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
return lowerBUILD_VECTOR(Op, DAG);
case ISD::VECTOR_SHUFFLE:
return lowerVECTOR_SHUFFLE(Op, DAG);
+ case ISD::FP_TO_FP16:
+ return lowerFP_TO_FP16(Op, DAG);
+ case ISD::FP16_TO_FP:
+ return lowerFP16_TO_FP(Op, DAG);
}
return SDValue();
}
@@ -1354,6 +1358,40 @@ SDValue LoongArchTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
return SDValue();
}
+SDValue LoongArchTargetLowering::lowerFP_TO_FP16(SDValue Op,
+ SelectionDAG &DAG) const {
+ // Custom lower to ensure the libcall return is passed in an FPR on hard
+ // float ABIs.
+ SDLoc DL(Op);
+ MakeLibCallOptions CallOptions;
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Chain = SDValue();
+ RTLIB::Libcall LC = RTLIB::getFPROUND(Op0.getValueType(), MVT::f16);
+ SDValue Res;
+ std::tie(Res, Chain) =
+ makeLibCall(DAG, LC, MVT::f32, Op0, CallOptions, DL, Chain);
+ if (Subtarget.is64Bit())
+ return DAG.getNode(LoongArchISD::MOVFR2GR_S_LA64, DL, MVT::i64, Res);
+ return DAG.getBitcast(MVT::i32, Res);
+}
+
+SDValue LoongArchTargetLowering::lowerFP16_TO_FP(SDValue Op,
+ SelectionDAG &DAG) const {
+ // Custom lower to ensure the libcall argument is passed in an FPR on hard
+ // float ABIs.
+ SDLoc DL(Op);
+ MakeLibCallOptions CallOptions;
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Chain = SDValue();
+ SDValue Arg = Subtarget.is64Bit() ? DAG.getNode(LoongArchISD::MOVGR2FR_W_LA64,
+ DL, MVT::f32, Op0)
+ : DAG.getBitcast(MVT::f32, Op0);
+ SDValue Res;
+ std::tie(Res, Chain) = makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg,
+ CallOptions, DL, Chain);
+ return Res;
+}
+
static bool isConstantOrUndef(const SDValue Op) {
if (Op->isUndef())
return true;
@@ -1656,16 +1694,19 @@ SDValue LoongArchTargetLowering::lowerFP_TO_SINT(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
+ SDValue Op0 = Op.getOperand(0);
+
+ if (Op0.getValueType() == MVT::f16)
+ Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0);
if (Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
!Subtarget.hasBasicD()) {
- SDValue Dst =
- DAG.getNode(LoongArchISD::FTINT, DL, MVT::f32, Op.getOperand(0));
+ SDValue Dst = DAG.getNode(LoongArchISD::FTINT, DL, MVT::f32, Op0);
return DAG.getNode(LoongArchISD::MOVFR2GR_S_LA64, DL, MVT::i64, Dst);
}
EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
- SDValue Trunc = DAG.getNode(LoongArchISD::FTINT, DL, FPTy, Op.getOperand(0));
+ SDValue Trunc = DAG.getNode(LoongArchISD::FTINT, DL, FPTy, Op0);
return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Trunc);
}
@@ -2848,6 +2889,10 @@ void LoongArchTargetLowering::ReplaceNodeResults(
EVT FVT = EVT::getFloatingPointVT(N->getValueSizeInBits(0));
if (getTypeAction(*DAG.getContext(), Src.getValueType()) !=
TargetLowering::TypeSoftenFloat) {
+ if (!isTypeLegal(Src.getValueType()))
+ return;
+ if (Src.getValueType() == MVT::f16)
+ Src = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
SDValue Dst = DAG.getNode(LoongArchISD::FTINT, DL, FVT, Src);
Results.push_back(DAG.getNode(ISD::BITCAST, DL, VT, Dst));
return;
@@ -4229,6 +4274,33 @@ performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue performMOVGR2FR_WCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const LoongArchSubtarget &Subtarget) {
+ // If the input to MOVGR2FR_W_LA64 is just MOVFR2GR_S_LA64 the the
+ // conversion is unnecessary and can be replaced with the
+ // MOVFR2GR_S_LA64 operand.
+ SDValue Op0 = N->getOperand(0);
+ if (Op0.getOpcode() == LoongArchISD::MOVFR2GR_S_LA64)
+ return Op0.getOperand(0);
+ return SDValue();
+}
+
+static SDValue performMOVFR2GR_SCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const LoongArchSubtarget &Subtarget) {
+ // If the input to MOVFR2GR_S_LA64 is just MOVGR2FR_W_LA64 then the
+ // conversion is unnecessary and can be replaced with the MOVGR2FR_W_LA64
+ // operand.
+ SDValue Op0 = N->getOperand(0);
+ MVT VT = N->getSimpleValueType(0);
+ if (Op0->getOpcode() == LoongArchISD::MOVGR2FR_W_LA64) {
+ assert(Op0.getOperand(0).getValueType() == VT && "Unexpected value type!");
+ return Op0.getOperand(0);
+ }
+ return SDValue();
+}
+
SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -4247,6 +4319,10 @@ SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
return performBITREV_WCombine(N, DAG, DCI, Subtarget);
case ISD::INTRINSIC_WO_CHAIN:
return performINTRINSIC_WO_CHAINCombine(N, DAG, DCI, Subtarget);
+ case LoongArchISD::MOVGR2FR_W_LA64:
+ return performMOVGR2FR_WCombine(N, DAG, DCI, Subtarget);
+ case LoongArchISD::MOVFR2GR_S_LA64:
+ return performMOVFR2GR_SCombine(N, DAG, DCI, Subtarget);
}
return SDValue();
}
@@ -6260,3 +6336,61 @@ bool LoongArchTargetLowering::shouldAlignPointerArgs(CallInst *CI,
return true;
}
+
+bool LoongArchTargetLowering::splitValueIntoRegisterParts(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
+ unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
+ bool IsABIRegCopy = CC.has_value();
+ EVT ValueVT = Val.getValueType();
+
+ if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
+ // Cast the f16 to i16, extend to i32, pad with ones to make a float
+ // nan, and cast to f32.
+ Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
+ Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
+ Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
+ DAG.getConstant(0xFFFF0000, DL, MVT::i32));
+ Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
+ Parts[0] = Val;
+ return true;
+ }
+
+ return false;
+}
+
+SDValue LoongArchTargetLowering::joinRegisterPartsIntoValue(
+ SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
+ MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
+ bool IsABIRegCopy = CC.has_value();
+
+ if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
+ SDValue Val = Parts[0];
+
+ // Cast the f32 to i32, truncate to i16, and cast back to f16.
+ Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
+ Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
+ Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
+ return Val;
+ }
+
+ return SDValue();
+}
+
+MVT LoongArchTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
+ CallingConv::ID CC,
+ EVT VT) const {
+ // Use f32 to pass f16.
+ if (VT == MVT::f16 && Subtarget.hasBasicF())
+ return MVT::f32;
+
+ return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
+}
+
+unsigned LoongArchTargetLowering::getNumRegistersForCallingConv(
+ LLVMContext &Context, CallingConv::ID CC, EVT VT) const {
+ // Use f32 to pass f16.
+ if (VT == MVT::f16 && Subtarget.hasBasicF())
+ return 1;
+
+ return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
+}
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 6177884bd19501..5636f0d8b3d601 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -315,6 +315,8 @@ class LoongArchTargetLowering : public TargetLowering {
SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) const;
bool isFPImmLegal(const APFloat &Imm, EVT VT,
bool ForCodeSize) const override;
@@ -339,6 +341,28 @@ class LoongArchTargetLowering : public TargetLowering {
const SmallVectorImpl<CCValAssign> &ArgLocs) const;
bool softPromoteHalfType() const override { return true; }
+
+ bool
+ splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
+ SDValue *Parts, unsigned NumParts, MVT PartVT,
+ std::optional<CallingConv::ID> CC) const override;
+
+ SDValue
+ joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
+ const SDValue *Parts, unsigned NumParts,
+ MVT PartVT, EVT ValueVT,
+ std::optional<CallingConv::ID> CC) const override;
+
+ /// Return the register type for a given MVT, ensuring vectors are treated
+ /// as a series of gpr sized integers.
+ MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
+ EVT VT) const override;
+
+ /// Return the number of registers for a given MVT, ensuring vectors are
+ /// treated as a series of gpr sized integers.
+ unsigned getNumRegistersForCallingConv(LLVMContext &Context,
+ CallingConv::ID CC,
+ EVT VT) const override;
};
} // end namespace llvm
diff --git a/llvm/test/CodeGen/LoongArch/calling-conv-half.ll b/llvm/test/CodeGen/LoongArch/calling-conv-half.ll
new file mode 100644
index 00000000000000..1f825fe5b62200
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-half.ll
@@ -0,0 +1,924 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F
+; RUN: llc --mtriple=loongarch32 --mattr=+d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F
+; RUN: llc --mtriple=loongarch64 --mattr=+d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D
+
+define i32 @callee_half_in_fregs(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, half %i) nounwind {
+; LA32S-LABEL: callee_half_in_fregs:
+; LA32S: # %bb.0:
+; LA32S-NEXT: addi.w $sp, $sp, -16
+; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT: ld.hu $a1, $sp, 16
+; LA32S-NEXT: move $fp, $a0
+; LA32S-NEXT: move $a0, $a1
+; LA32S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32S-NEXT: bl %plt(__fixsfsi)
+; LA32S-NEXT: add.w $a0, $fp, $a0
+; LA32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT: addi.w $sp, $sp, 16
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: callee_half_in_fregs:
+; LA32F: # %bb.0:
+; LA32F-NEXT: addi.w $sp, $sp, -16
+; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-NEXT: move $fp, $a0
+; LA32F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32F-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32F-NEXT: movfr2gr.s $a0, $fa0
+; LA32F-NEXT: add.w $a0, $fp, $a0
+; LA32F-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT: addi.w $sp, $sp, 16
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: callee_half_in_fregs:
+; LA32D: # %bb.0:
+; LA32D-NEXT: addi.w $sp, $sp, -16
+; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-NEXT: move $fp, $a0
+; LA32D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32D-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32D-NEXT: movfr2gr.s $a0, $fa0
+; LA32D-NEXT: add.w $a0, $fp, $a0
+; LA32D-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT: addi.w $sp, $sp, 16
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: callee_half_in_fregs:
+; LA64S: # %bb.0:
+; LA64S-NEXT: addi.d $sp, $sp, -16
+; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT: ld.hu $a1, $sp, 16
+; LA64S-NEXT: move $fp, $a0
+; LA64S-NEXT: move $a0, $a1
+; LA64S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64S-NEXT: bl %plt(__fixsfdi)
+; LA64S-NEXT: add.w $a0, $fp, $a0
+; LA64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT: addi.d $sp, $sp, 16
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: callee_half_in_fregs:
+; LA64F: # %bb.0:
+; LA64F-NEXT: addi.d $sp, $sp, -16
+; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-NEXT: move $fp, $a0
+; LA64F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT: movfr2gr.s $a0, $fa0
+; LA64F-NEXT: add.w $a0, $fp, $a0
+; LA64F-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-NEXT: addi.d $sp, $sp, 16
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: callee_half_in_fregs:
+; LA64D: # %bb.0:
+; LA64D-NEXT: addi.d $sp, $sp, -16
+; LA64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-NEXT: move $fp, $a0
+; LA64D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT: movfr2gr.d $a0, $fa0
+; LA64D-NEXT: add.w $a0, $fp, $a0
+; LA64D-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-NEXT: addi.d $sp, $sp, 16
+; LA64D-NEXT: ret
+ %1 = fptosi half %i to i32
+ %2 = add i32 %a, %1
+ ret i32 %2
+}
+
+define i32 @caller_half_in_fregs() nounwind {
+; LA32S-LABEL: caller_half_in_fregs:
+; LA32S: # %bb.0:
+; LA32S-NEXT: addi.w $sp, $sp, -16
+; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT: lu12i.w $t0, 4
+; LA32S-NEXT: ori $a0, $zero, 1
+; LA32S-NEXT: ori $a1, $zero, 2
+; LA32S-NEXT: ori $a2, $zero, 3
+; LA32S-NEXT: ori $a3, $zero, 4
+; LA32S-NEXT: ori $a4, $zero, 5
+; LA32S-NEXT: ori $a5, $zero, 6
+; LA32S-NEXT: ori $a6, $zero, 7
+; LA32S-NEXT: ori $a7, $zero, 8
+; LA32S-NEXT: st.w $t0, $sp, 0
+; LA32S-NEXT: bl %plt(callee_half_in_fregs)
+; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT: addi.w $sp, $sp, 16
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: caller_half_in_fregs:
+; LA32F: # %bb.0:
+; LA32F-NEXT: addi.w $sp, $sp, -16
+; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA32F-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA32F-NEXT: ori $a0, $zero, 1
+; LA32F-NEXT: ori $a1, $zero, 2
+; LA32F-NEXT: ori $a2, $zero, 3
+; LA32F-NEXT: ori $a3, $zero, 4
+; LA32F-NEXT: ori $a4, $zero, 5
+; LA32F-NEXT: ori $a5, $zero, 6
+; LA32F-NEXT: ori $a6, $zero, 7
+; LA32F-NEXT: ori $a7, $zero, 8
+; LA32F-NEXT: bl %plt(callee_half_in_fregs)
+; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT: addi.w $sp, $sp, 16
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: caller_half_in_fregs:
+; LA32D: # %bb.0:
+; LA32D-NEXT: addi.w $sp, $sp, -16
+; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA32D-NEXT: ori $a0, $zero, 1
+; LA32D-NEXT: ori $a1, $zero, 2
+; LA32D-NEXT: ori $a2, $zero, 3
+; LA32D-NEXT: ori $a3, $zero, 4
+; LA32D-NEXT: ori $a4, $zero, 5
+; LA32D-NEXT: ori $a5, $zero, 6
+; LA32D-NEXT: ori $a6, $zero, 7
+; LA32D-NEXT: ori $a7, $zero, 8
+; LA32D-NEXT: bl %plt(callee_half_in_fregs)
+; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT: addi.w $sp, $sp, 16
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: caller_half_in_fregs:
+; LA64S: # %bb.0:
+; LA64S-NEXT: addi.d $sp, $sp, -16
+; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT: lu12i.w $t0, 4
+; LA64S-NEXT: ori $a0, $zero, 1
+; LA64S-NEXT: ori $a1, $zero, 2
+; LA64S-NEXT: ori $a2, $zero, 3
+; LA64S-NEXT: ori $a3, $zero, 4
+; LA64S-NEXT: ori $a4, $zero, 5
+; LA64S-NEXT: ori $a5, $zero, 6
+; LA64S-NEXT: ori $a6, $zero, 7
+; LA64S-NEXT: ori $a7, $zero, 8
+; LA64S-NEXT: st.d $t0, $sp, 0
+; LA64S-NEXT: bl %plt(callee_half_in_fregs)
+; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT: addi.d $sp, $sp, 16
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: caller_half_in_fregs:
+; LA64F: # %bb.0:
+; LA64F-NEXT: addi.d $sp, $sp, -16
+; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA64F-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA64F-NEXT: ori $a0, $zero, 1
+; LA64F-NEXT: ori $a1, $zero, 2
+; LA64F-NEXT: ori $a2, $zero, 3
+; LA64F-NEXT: ori $a3, $zero, 4
+; LA64F-NEXT: ori $a4, $zero, 5
+; LA64F-NEXT: ori $a5, $zero, 6
+; LA64F-NEXT: ori $a6, $zero, 7
+; LA64F-NEXT: ori $a7, $zero, 8
+; LA64F-NEXT: bl %plt(callee_half_in_fregs)
+; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-NEXT: addi.d $sp, $sp, 16
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: caller_half_in_fregs:
+; LA64D: # %bb.0:
+; LA64D-NEXT: addi.d $sp, $sp, -16
+; LA64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
+; LA64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
+; LA64D-NEXT: ori $a0, $zero, 1
+; LA64D-NEXT: ori $a1, $zero, 2
+; LA64D-NEXT: ori $a2, $zero, 3
+; LA64D-NEXT: ori $a3, $zero, 4
+; LA64D-NEXT: ori $a4, $zero, 5
+; LA64D-NEXT: ori $a5, $zero, 6
+; LA64D-NEXT: ori $a6, $zero, 7
+; LA64D-NEXT: ori $a7, $zero, 8
+; LA64D-NEXT: bl %plt(callee_half_in_fregs)
+; LA64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-NEXT: addi.d $sp, $sp, 16
+; LA64D-NEXT: ret
+ %1 = call i32 @callee_half_in_fregs(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 2.0)
+ ret i32 %1
+}
+
+define i32 @callee_half_in_gregs(half %a, half %b, half %c, half %d, half %e, half %f, half %g, half %h, half %i, i32 %j) nounwind {
+; LA32S-LABEL: callee_half_in_gregs:
+; LA32S: # %bb.0:
+; LA32S-NEXT: addi.w $sp, $sp, -16
+; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT: ld.w $fp, $sp, 20
+; LA32S-NEXT: ld.hu $a0, $sp, 16
+; LA32S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32S-NEXT: bl %plt(__fixsfsi)
+; LA32S-NEXT: add.w $a0, $fp, $a0
+; LA32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT: addi.w $sp, $sp, 16
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: callee_half_in_gregs:
+; LA32F: # %bb.0:
+; LA32F-NEXT: addi.w $sp, $sp, -16
+; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-NEXT: move $fp, $a1
+; LA32F-NEXT: movgr2fr.w $fa0, $a0
+; LA32F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32F-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32F-NEXT: movfr2gr.s $a0, $fa0
+; LA32F-NEXT: add.w $a0, $fp, $a0
+; LA32F-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT: addi.w $sp, $sp, 16
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: callee_half_in_gregs:
+; LA32D: # %bb.0:
+; LA32D-NEXT: addi.w $sp, $sp, -16
+; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-NEXT: move $fp, $a1
+; LA32D-NEXT: movgr2fr.w $fa0, $a0
+; LA32D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32D-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32D-NEXT: movfr2gr.s $a0, $fa0
+; LA32D-NEXT: add.w $a0, $fp, $a0
+; LA32D-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT: addi.w $sp, $sp, 16
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: callee_half_in_gregs:
+; LA64S: # %bb.0:
+; LA64S-NEXT: addi.d $sp, $sp, -16
+; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT: ld.w $fp, $sp, 24
+; LA64S-NEXT: ld.hu $a0, $sp, 16
+; LA64S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64S-NEXT: bl %plt(__fixsfdi)
+; LA64S-NEXT: add.w $a0, $fp, $a0
+; LA64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT: addi.d $sp, $sp, 16
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: callee_half_in_gregs:
+; LA64F: # %bb.0:
+; LA64F-NEXT: addi.d $sp, $sp, -16
+; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-NEXT: move $fp, $a1
+; LA64F-NEXT: movgr2fr.w $fa0, $a0
+; LA64F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT: movfr2gr.s $a0, $fa0
+; LA64F-NEXT: add.w $a0, $fp, $a0
+; LA64F-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-NEXT: addi.d $sp, $sp, 16
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: callee_half_in_gregs:
+; LA64D: # %bb.0:
+; LA64D-NEXT: addi.d $sp, $sp, -16
+; LA64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-NEXT: move $fp, $a1
+; LA64D-NEXT: movgr2fr.w $fa0, $a0
+; LA64D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT: movfr2gr.d $a0, $fa0
+; LA64D-NEXT: add.w $a0, $fp, $a0
+; LA64D-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-NEXT: addi.d $sp, $sp, 16
+; LA64D-NEXT: ret
+ %1 = fptosi half %i to i32
+ %2 = add i32 %j, %1
+ ret i32 %2
+}
+
+define i32 @caller_half_in_gregs() nounwind {
+; LA32S-LABEL: caller_half_in_gregs:
+; LA32S: # %bb.0:
+; LA32S-NEXT: addi.w $sp, $sp, -16
+; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT: ori $a0, $zero, 10
+; LA32S-NEXT: st.w $a0, $sp, 4
+; LA32S-NEXT: lu12i.w $a1, 4
+; LA32S-NEXT: ori $t0, $a1, 2176
+; LA32S-NEXT: lu12i.w $a0, 3
+; LA32S-NEXT: ori $a0, $a0, 3072
+; LA32S-NEXT: ori $a2, $a1, 512
+; LA32S-NEXT: ori $a3, $a1, 1024
+; LA32S-NEXT: ori $a4, $a1, 1280
+; LA32S-NEXT: ori $a5, $a1, 1536
+; LA32S-NEXT: ori $a6, $a1, 1792
+; LA32S-NEXT: ori $a7, $a1, 2048
+; LA32S-NEXT: st.w $t0, $sp, 0
+; LA32S-NEXT: bl %plt(callee_half_in_gregs)
+; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT: addi.w $sp, $sp, 16
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: caller_half_in_gregs:
+; LA32F: # %bb.0:
+; LA32F-NEXT: addi.w $sp, $sp, -16
+; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA32F-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA32F-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA32F-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI3_2)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA32F-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI3_3)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA32F-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI3_4)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA32F-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI3_5)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA32F-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI3_6)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA32F-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI3_7)
+; LA32F-NEXT: lu12i.w $a0, -12
+; LA32F-NEXT: ori $a0, $a0, 2176
+; LA32F-NEXT: ori $a1, $zero, 10
+; LA32F-NEXT: bl %plt(callee_half_in_gregs)
+; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT: addi.w $sp, $sp, 16
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: caller_half_in_gregs:
+; LA32D: # %bb.0:
+; LA32D-NEXT: addi.w $sp, $sp, -16
+; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA32D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA32D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI3_2)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA32D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI3_3)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA32D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI3_4)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA32D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI3_5)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA32D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI3_6)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA32D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI3_7)
+; LA32D-NEXT: lu12i.w $a0, -12
+; LA32D-NEXT: ori $a0, $a0, 2176
+; LA32D-NEXT: ori $a1, $zero, 10
+; LA32D-NEXT: bl %plt(callee_half_in_gregs)
+; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT: addi.w $sp, $sp, 16
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: caller_half_in_gregs:
+; LA64S: # %bb.0:
+; LA64S-NEXT: addi.d $sp, $sp, -32
+; LA64S-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64S-NEXT: ori $a0, $zero, 10
+; LA64S-NEXT: st.d $a0, $sp, 8
+; LA64S-NEXT: lu12i.w $a1, 4
+; LA64S-NEXT: ori $t0, $a1, 2176
+; LA64S-NEXT: lu12i.w $a0, 3
+; LA64S-NEXT: ori $a0, $a0, 3072
+; LA64S-NEXT: ori $a2, $a1, 512
+; LA64S-NEXT: ori $a3, $a1, 1024
+; LA64S-NEXT: ori $a4, $a1, 1280
+; LA64S-NEXT: ori $a5, $a1, 1536
+; LA64S-NEXT: ori $a6, $a1, 1792
+; LA64S-NEXT: ori $a7, $a1, 2048
+; LA64S-NEXT: st.d $t0, $sp, 0
+; LA64S-NEXT: bl %plt(callee_half_in_gregs)
+; LA64S-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64S-NEXT: addi.d $sp, $sp, 32
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: caller_half_in_gregs:
+; LA64F: # %bb.0:
+; LA64F-NEXT: addi.d $sp, $sp, -16
+; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64F-NEXT: fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64F-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA64F-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA64F-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA64F-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA64F-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA64F-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA64F-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_8)
+; LA64F-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
+; LA64F-NEXT: movfr2gr.s $a0, $ft0
+; LA64F-NEXT: ori $a1, $zero, 10
+; LA64F-NEXT: bl %plt(callee_half_in_gregs)
+; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-NEXT: addi.d $sp, $sp, 16
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: caller_half_in_gregs:
+; LA64D: # %bb.0:
+; LA64D-NEXT: addi.d $sp, $sp, -16
+; LA64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; LA64D-NEXT: fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; LA64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; LA64D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; LA64D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; LA64D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; LA64D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; LA64D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_7)
+; LA64D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_8)
+; LA64D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
+; LA64D-NEXT: movfr2gr.s $a0, $ft0
+; LA64D-NEXT: ori $a1, $zero, 10
+; LA64D-NEXT: bl %plt(callee_half_in_gregs)
+; LA64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-NEXT: addi.d $sp, $sp, 16
+; LA64D-NEXT: ret
+ %1 = call i32 @callee_half_in_gregs(half 1.0, half 2.0, half 3.0, half 4.0, half 5.0, half 6.0, half 7.0, half 8.0, half 9.0, i32 10)
+ ret i32 %1
+}
+
+define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, half %i, half %j, half %k, half %l, half %m, half %n, half %o, half %p, half %q) nounwind {
+; LA32S-LABEL: callee_half_on_stack:
+; LA32S: # %bb.0:
+; LA32S-NEXT: addi.w $sp, $sp, -16
+; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT: ld.hu $a0, $sp, 48
+; LA32S-NEXT: move $fp, $a7
+; LA32S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32S-NEXT: bl %plt(__fixsfsi)
+; LA32S-NEXT: add.w $a0, $fp, $a0
+; LA32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT: addi.w $sp, $sp, 16
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: callee_half_on_stack:
+; LA32F: # %bb.0:
+; LA32F-NEXT: addi.w $sp, $sp, -16
+; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32F-NEXT: ld.hu $a0, $sp, 16
+; LA32F-NEXT: move $fp, $a7
+; LA32F-NEXT: movgr2fr.w $fa0, $a0
+; LA32F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32F-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32F-NEXT: movfr2gr.s $a0, $fa0
+; LA32F-NEXT: add.w $a0, $fp, $a0
+; LA32F-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT: addi.w $sp, $sp, 16
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: callee_half_on_stack:
+; LA32D: # %bb.0:
+; LA32D-NEXT: addi.w $sp, $sp, -16
+; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32D-NEXT: ld.hu $a0, $sp, 16
+; LA32D-NEXT: move $fp, $a7
+; LA32D-NEXT: movgr2fr.w $fa0, $a0
+; LA32D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32D-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32D-NEXT: movfr2gr.s $a0, $fa0
+; LA32D-NEXT: add.w $a0, $fp, $a0
+; LA32D-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT: addi.w $sp, $sp, 16
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: callee_half_on_stack:
+; LA64S: # %bb.0:
+; LA64S-NEXT: addi.d $sp, $sp, -16
+; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT: ld.hu $a0, $sp, 80
+; LA64S-NEXT: move $fp, $a7
+; LA64S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64S-NEXT: bl %plt(__fixsfdi)
+; LA64S-NEXT: add.w $a0, $fp, $a0
+; LA64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT: addi.d $sp, $sp, 16
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: callee_half_on_stack:
+; LA64F: # %bb.0:
+; LA64F-NEXT: addi.d $sp, $sp, -16
+; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64F-NEXT: ld.hu $a0, $sp, 16
+; LA64F-NEXT: move $fp, $a7
+; LA64F-NEXT: movgr2fr.w $fa0, $a0
+; LA64F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT: movfr2gr.s $a0, $fa0
+; LA64F-NEXT: add.w $a0, $fp, $a0
+; LA64F-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-NEXT: addi.d $sp, $sp, 16
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: callee_half_on_stack:
+; LA64D: # %bb.0:
+; LA64D-NEXT: addi.d $sp, $sp, -16
+; LA64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64D-NEXT: ld.hu $a0, $sp, 16
+; LA64D-NEXT: move $fp, $a7
+; LA64D-NEXT: movgr2fr.w $fa0, $a0
+; LA64D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT: movfr2gr.d $a0, $fa0
+; LA64D-NEXT: add.w $a0, $fp, $a0
+; LA64D-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-NEXT: addi.d $sp, $sp, 16
+; LA64D-NEXT: ret
+ %1 = fptosi half %q to i32
+ %2 = add i32 %h, %1
+ ret i32 %2
+}
+
+define i32 @caller_half_on_stack() nounwind {
+; LA32S-LABEL: caller_half_on_stack:
+; LA32S: # %bb.0:
+; LA32S-NEXT: addi.w $sp, $sp, -48
+; LA32S-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill
+; LA32S-NEXT: lu12i.w $a0, 4
+; LA32S-NEXT: ori $a1, $a0, 3200
+; LA32S-NEXT: st.w $a1, $sp, 32
+; LA32S-NEXT: ori $a1, $a0, 3136
+; LA32S-NEXT: st.w $a1, $sp, 28
+; LA32S-NEXT: ori $a1, $a0, 3072
+; LA32S-NEXT: st.w $a1, $sp, 24
+; LA32S-NEXT: ori $a1, $a0, 2944
+; LA32S-NEXT: st.w $a1, $sp, 20
+; LA32S-NEXT: ori $a1, $a0, 2816
+; LA32S-NEXT: st.w $a1, $sp, 16
+; LA32S-NEXT: ori $a1, $a0, 2688
+; LA32S-NEXT: st.w $a1, $sp, 12
+; LA32S-NEXT: ori $a1, $a0, 2560
+; LA32S-NEXT: st.w $a1, $sp, 8
+; LA32S-NEXT: ori $a1, $a0, 2432
+; LA32S-NEXT: st.w $a1, $sp, 4
+; LA32S-NEXT: ori $t0, $a0, 2304
+; LA32S-NEXT: ori $a0, $zero, 1
+; LA32S-NEXT: ori $a1, $zero, 2
+; LA32S-NEXT: ori $a2, $zero, 3
+; LA32S-NEXT: ori $a3, $zero, 4
+; LA32S-NEXT: ori $a4, $zero, 5
+; LA32S-NEXT: ori $a5, $zero, 6
+; LA32S-NEXT: ori $a6, $zero, 7
+; LA32S-NEXT: ori $a7, $zero, 8
+; LA32S-NEXT: st.w $t0, $sp, 0
+; LA32S-NEXT: bl %plt(callee_half_on_stack)
+; LA32S-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload
+; LA32S-NEXT: addi.w $sp, $sp, 48
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: caller_half_on_stack:
+; LA32F: # %bb.0:
+; LA32F-NEXT: addi.w $sp, $sp, -16
+; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT: lu12i.w $a0, -12
+; LA32F-NEXT: ori $t0, $a0, 3200
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA32F-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA32F-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA32F-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA32F-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA32F-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA32F-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA32F-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA32F-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA32F-NEXT: ori $a0, $zero, 1
+; LA32F-NEXT: ori $a1, $zero, 2
+; LA32F-NEXT: ori $a2, $zero, 3
+; LA32F-NEXT: ori $a3, $zero, 4
+; LA32F-NEXT: ori $a4, $zero, 5
+; LA32F-NEXT: ori $a5, $zero, 6
+; LA32F-NEXT: ori $a6, $zero, 7
+; LA32F-NEXT: ori $a7, $zero, 8
+; LA32F-NEXT: st.w $t0, $sp, 0
+; LA32F-NEXT: bl %plt(callee_half_on_stack)
+; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT: addi.w $sp, $sp, 16
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: caller_half_on_stack:
+; LA32D: # %bb.0:
+; LA32D-NEXT: addi.w $sp, $sp, -16
+; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT: lu12i.w $a0, -12
+; LA32D-NEXT: ori $t0, $a0, 3200
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA32D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA32D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA32D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA32D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA32D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA32D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA32D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA32D-NEXT: ori $a0, $zero, 1
+; LA32D-NEXT: ori $a1, $zero, 2
+; LA32D-NEXT: ori $a2, $zero, 3
+; LA32D-NEXT: ori $a3, $zero, 4
+; LA32D-NEXT: ori $a4, $zero, 5
+; LA32D-NEXT: ori $a5, $zero, 6
+; LA32D-NEXT: ori $a6, $zero, 7
+; LA32D-NEXT: ori $a7, $zero, 8
+; LA32D-NEXT: st.w $t0, $sp, 0
+; LA32D-NEXT: bl %plt(callee_half_on_stack)
+; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT: addi.w $sp, $sp, 16
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: caller_half_on_stack:
+; LA64S: # %bb.0:
+; LA64S-NEXT: addi.d $sp, $sp, -80
+; LA64S-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
+; LA64S-NEXT: lu12i.w $a0, 4
+; LA64S-NEXT: ori $a1, $a0, 3200
+; LA64S-NEXT: st.d $a1, $sp, 64
+; LA64S-NEXT: ori $a1, $a0, 3136
+; LA64S-NEXT: st.d $a1, $sp, 56
+; LA64S-NEXT: ori $a1, $a0, 3072
+; LA64S-NEXT: st.d $a1, $sp, 48
+; LA64S-NEXT: ori $a1, $a0, 2944
+; LA64S-NEXT: st.d $a1, $sp, 40
+; LA64S-NEXT: ori $a1, $a0, 2816
+; LA64S-NEXT: st.d $a1, $sp, 32
+; LA64S-NEXT: ori $a1, $a0, 2688
+; LA64S-NEXT: st.d $a1, $sp, 24
+; LA64S-NEXT: ori $a1, $a0, 2560
+; LA64S-NEXT: st.d $a1, $sp, 16
+; LA64S-NEXT: ori $a1, $a0, 2432
+; LA64S-NEXT: st.d $a1, $sp, 8
+; LA64S-NEXT: ori $t0, $a0, 2304
+; LA64S-NEXT: ori $a0, $zero, 1
+; LA64S-NEXT: ori $a1, $zero, 2
+; LA64S-NEXT: ori $a2, $zero, 3
+; LA64S-NEXT: ori $a3, $zero, 4
+; LA64S-NEXT: ori $a4, $zero, 5
+; LA64S-NEXT: ori $a5, $zero, 6
+; LA64S-NEXT: ori $a6, $zero, 7
+; LA64S-NEXT: ori $a7, $zero, 8
+; LA64S-NEXT: st.d $t0, $sp, 0
+; LA64S-NEXT: bl %plt(callee_half_on_stack)
+; LA64S-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; LA64S-NEXT: addi.d $sp, $sp, 80
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: caller_half_on_stack:
+; LA64F: # %bb.0:
+; LA64F-NEXT: addi.d $sp, $sp, -16
+; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-NEXT: lu12i.w $a0, -12
+; LA64F-NEXT: ori $t0, $a0, 3200
+; LA64F-NEXT: lu32i.d $t0, 0
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA64F-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA64F-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA64F-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA64F-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA64F-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA64F-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA64F-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA64F-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA64F-NEXT: ori $a0, $zero, 1
+; LA64F-NEXT: ori $a1, $zero, 2
+; LA64F-NEXT: ori $a2, $zero, 3
+; LA64F-NEXT: ori $a3, $zero, 4
+; LA64F-NEXT: ori $a4, $zero, 5
+; LA64F-NEXT: ori $a5, $zero, 6
+; LA64F-NEXT: ori $a6, $zero, 7
+; LA64F-NEXT: ori $a7, $zero, 8
+; LA64F-NEXT: st.w $t0, $sp, 0
+; LA64F-NEXT: bl %plt(callee_half_on_stack)
+; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-NEXT: addi.d $sp, $sp, 16
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: caller_half_on_stack:
+; LA64D: # %bb.0:
+; LA64D-NEXT: addi.d $sp, $sp, -16
+; LA64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-NEXT: lu12i.w $a0, -12
+; LA64D-NEXT: ori $t0, $a0, 3200
+; LA64D-NEXT: lu32i.d $t0, 0
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
+; LA64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
+; LA64D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_2)
+; LA64D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_3)
+; LA64D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_4)
+; LA64D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_5)
+; LA64D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_6)
+; LA64D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_7)
+; LA64D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
+; LA64D-NEXT: ori $a0, $zero, 1
+; LA64D-NEXT: ori $a1, $zero, 2
+; LA64D-NEXT: ori $a2, $zero, 3
+; LA64D-NEXT: ori $a3, $zero, 4
+; LA64D-NEXT: ori $a4, $zero, 5
+; LA64D-NEXT: ori $a5, $zero, 6
+; LA64D-NEXT: ori $a6, $zero, 7
+; LA64D-NEXT: ori $a7, $zero, 8
+; LA64D-NEXT: st.w $t0, $sp, 0
+; LA64D-NEXT: bl %plt(callee_half_on_stack)
+; LA64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-NEXT: addi.d $sp, $sp, 16
+; LA64D-NEXT: ret
+ %1 = call i32 @callee_half_on_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 10.0, half 11.0, half 12.0, half 13.0, half 14.0, half 15.0, half 16.0, half 17.0, half 18.0)
+ ret i32 %1
+}
+
+define half @callee_half_ret() nounwind {
+; LA32S-LABEL: callee_half_ret:
+; LA32S: # %bb.0:
+; LA32S-NEXT: lu12i.w $a0, 3
+; LA32S-NEXT: ori $a0, $a0, 3072
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: callee_half_ret:
+; LA32F: # %bb.0:
+; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA32F-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: callee_half_ret:
+; LA32D: # %bb.0:
+; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: callee_half_ret:
+; LA64S: # %bb.0:
+; LA64S-NEXT: lu12i.w $a0, 3
+; LA64S-NEXT: ori $a0, $a0, 3072
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: callee_half_ret:
+; LA64F: # %bb.0:
+; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64F-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: callee_half_ret:
+; LA64D: # %bb.0:
+; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; LA64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
+; LA64D-NEXT: ret
+ ret half 1.0
+}
+
+define i32 @caller_half_ret() nounwind {
+; LA32S-LABEL: caller_half_ret:
+; LA32S: # %bb.0:
+; LA32S-NEXT: addi.w $sp, $sp, -16
+; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT: bl %plt(callee_half_ret)
+; LA32S-NEXT: bstrpick.w $a0, $a0, 15, 0
+; LA32S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32S-NEXT: bl %plt(__fixsfsi)
+; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT: addi.w $sp, $sp, 16
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: caller_half_ret:
+; LA32F: # %bb.0:
+; LA32F-NEXT: addi.w $sp, $sp, -16
+; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT: bl %plt(callee_half_ret)
+; LA32F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32F-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32F-NEXT: movfr2gr.s $a0, $fa0
+; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT: addi.w $sp, $sp, 16
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: caller_half_ret:
+; LA32D: # %bb.0:
+; LA32D-NEXT: addi.w $sp, $sp, -16
+; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT: bl %plt(callee_half_ret)
+; LA32D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32D-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32D-NEXT: movfr2gr.s $a0, $fa0
+; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT: addi.w $sp, $sp, 16
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: caller_half_ret:
+; LA64S: # %bb.0:
+; LA64S-NEXT: addi.d $sp, $sp, -16
+; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT: bl %plt(callee_half_ret)
+; LA64S-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64S-NEXT: bl %plt(__fixsfdi)
+; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT: addi.d $sp, $sp, 16
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: caller_half_ret:
+; LA64F: # %bb.0:
+; LA64F-NEXT: addi.d $sp, $sp, -16
+; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64F-NEXT: bl %plt(callee_half_ret)
+; LA64F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT: movfr2gr.s $a0, $fa0
+; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64F-NEXT: addi.d $sp, $sp, 16
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: caller_half_ret:
+; LA64D: # %bb.0:
+; LA64D-NEXT: addi.d $sp, $sp, -16
+; LA64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64D-NEXT: bl %plt(callee_half_ret)
+; LA64D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT: movfr2gr.d $a0, $fa0
+; LA64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64D-NEXT: addi.d $sp, $sp, 16
+; LA64D-NEXT: ret
+ %1 = call half @callee_half_ret()
+ %2 = fptosi half %1 to i32
+ ret i32 %2
+}
diff --git a/llvm/test/CodeGen/LoongArch/fp16-promote.ll b/llvm/test/CodeGen/LoongArch/fp16-promote.ll
index 03965ac81f3763..9125156c59bebc 100644
--- a/llvm/test/CodeGen/LoongArch/fp16-promote.ll
+++ b/llvm/test/CodeGen/LoongArch/fp16-promote.ll
@@ -22,13 +22,25 @@ define void @test_load_store(ptr %p, ptr %q) nounwind {
define float @test_fpextend_float(ptr %p) nounwind {
; LA32-LABEL: test_fpextend_float:
; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: ld.hu $a0, $a0, 0
-; LA32-NEXT: b %plt(__gnu_h2f_ieee)
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
;
; LA64-LABEL: test_fpextend_float:
; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -16
+; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
; LA64-NEXT: ld.hu $a0, $a0, 0
-; LA64-NEXT: b %plt(__gnu_h2f_ieee)
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 16
+; LA64-NEXT: ret
%a = load half, ptr %p
%r = fpext half %a to float
ret float %r
@@ -40,6 +52,7 @@ define double @test_fpextend_double(ptr %p) nounwind {
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: ld.hu $a0, $a0, 0
+; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
; LA32-NEXT: fcvt.d.s $fa0, $fa0
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -51,6 +64,7 @@ define double @test_fpextend_double(ptr %p) nounwind {
; LA64-NEXT: addi.d $sp, $sp, -16
; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
; LA64-NEXT: ld.hu $a0, $a0, 0
+; LA64-NEXT: movgr2fr.w $fa0, $a0
; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
; LA64-NEXT: fcvt.d.s $fa0, $fa0
; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
@@ -69,6 +83,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
; LA32-NEXT: move $fp, $a0
; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: movfr2gr.s $a0, $fa0
; LA32-NEXT: st.h $a0, $fp, 0
; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -82,6 +97,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
; LA64-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
; LA64-NEXT: move $fp, $a0
; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: movfr2gr.s $a0, $fa0
; LA64-NEXT: st.h $a0, $fp, 0
; LA64-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
@@ -100,6 +116,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
; LA32-NEXT: move $fp, $a0
; LA32-NEXT: bl %plt(__truncdfhf2)
+; LA32-NEXT: movfr2gr.s $a0, $fa0
; LA32-NEXT: st.h $a0, $fp, 0
; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
@@ -113,6 +130,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
; LA64-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
; LA64-NEXT: move $fp, $a0
; LA64-NEXT: bl %plt(__truncdfhf2)
+; LA64-NEXT: movfr2gr.s $a0, $fa0
; LA64-NEXT: st.h $a0, $fp, 0
; LA64-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
@@ -126,40 +144,48 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
define half @test_fadd_reg(half %a, half %b) nounwind {
; LA32-LABEL: test_fadd_reg:
; LA32: # %bb.0:
-; LA32-NEXT: addi.w $sp, $sp, -16
-; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
-; LA32-NEXT: fst.d $fs0, $sp, 0 # 8-byte Folded Spill
-; LA32-NEXT: move $fp, $a0
-; LA32-NEXT: move $a0, $a1
-; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: addi.w $sp, $sp, -32
+; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
; LA32-NEXT: fmov.s $fs0, $fa0
-; LA32-NEXT: move $a0, $fp
+; LA32-NEXT: fmov.s $fa0, $fa1
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fmov.s $fs1, $fa0
+; LA32-NEXT: fmov.s $fa0, $fs0
; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
-; LA32-NEXT: fadd.s $fa0, $fa0, $fs0
+; LA32-NEXT: fadd.s $fa0, $fa0, $fs1
; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
-; LA32-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload
-; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
-; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: movfr2gr.s $a0, $fa0
+; LA32-NEXT: lu12i.w $a1, -16
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 32
; LA32-NEXT: ret
;
; LA64-LABEL: test_fadd_reg:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -32
; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
-; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
-; LA64-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT: move $fp, $a0
-; LA64-NEXT: move $a0, $a1
-; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
; LA64-NEXT: fmov.s $fs0, $fa0
-; LA64-NEXT: move $a0, $fp
+; LA64-NEXT: fmov.s $fa0, $fa1
; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
-; LA64-NEXT: fadd.s $fa0, $fa0, $fs0
+; LA64-NEXT: fmov.s $fs1, $fa0
+; LA64-NEXT: fmov.s $fa0, $fs0
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fadd.s $fa0, $fa0, $fs1
; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
-; LA64-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: lu12i.w $a1, -16
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 32
; LA64-NEXT: ret
@@ -173,20 +199,23 @@ define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
; LA32-NEXT: addi.w $sp, $sp, -32
; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
; LA32-NEXT: st.w $fp, $sp, 24 # 4-byte Folded Spill
-; LA32-NEXT: st.w $s0, $sp, 20 # 4-byte Folded Spill
-; LA32-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
; LA32-NEXT: move $fp, $a0
-; LA32-NEXT: ld.hu $s0, $a0, 0
-; LA32-NEXT: ld.hu $a0, $a1, 0
+; LA32-NEXT: ld.hu $a0, $a0, 0
+; LA32-NEXT: ld.hu $a1, $a1, 0
+; LA32-NEXT: movgr2fr.w $fs0, $a0
+; LA32-NEXT: movgr2fr.w $fa0, $a1
; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
-; LA32-NEXT: fmov.s $fs0, $fa0
-; LA32-NEXT: move $a0, $s0
+; LA32-NEXT: fmov.s $fs1, $fa0
+; LA32-NEXT: fmov.s $fa0, $fs0
; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
-; LA32-NEXT: fadd.s $fa0, $fa0, $fs0
+; LA32-NEXT: fadd.s $fa0, $fa0, $fs1
; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: movfr2gr.s $a0, $fa0
; LA32-NEXT: st.h $a0, $fp, 0
-; LA32-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA32-NEXT: ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
; LA32-NEXT: ld.w $fp, $sp, 24 # 4-byte Folded Reload
; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 32
@@ -202,12 +231,14 @@ define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
; LA64-NEXT: move $fp, $a0
; LA64-NEXT: ld.hu $s0, $a0, 0
; LA64-NEXT: ld.hu $a0, $a1, 0
+; LA64-NEXT: movgr2fr.w $fa0, $a0
; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
; LA64-NEXT: fmov.s $fs0, $fa0
-; LA64-NEXT: move $a0, $s0
+; LA64-NEXT: movgr2fr.w $fa0, $s0
; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
; LA64-NEXT: fadd.s $fa0, $fa0, $fs0
; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: movfr2gr.s $a0, $fa0
; LA64-NEXT: st.h $a0, $fp, 0
; LA64-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
@@ -225,40 +256,48 @@ define void @test_fadd_mem(ptr %p, ptr %q) nounwind {
define half @test_fmul_reg(half %a, half %b) nounwind {
; LA32-LABEL: test_fmul_reg:
; LA32: # %bb.0:
-; LA32-NEXT: addi.w $sp, $sp, -16
-; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
-; LA32-NEXT: fst.d $fs0, $sp, 0 # 8-byte Folded Spill
-; LA32-NEXT: move $fp, $a0
-; LA32-NEXT: move $a0, $a1
-; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: addi.w $sp, $sp, -32
+; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
; LA32-NEXT: fmov.s $fs0, $fa0
-; LA32-NEXT: move $a0, $fp
+; LA32-NEXT: fmov.s $fa0, $fa1
+; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32-NEXT: fmov.s $fs1, $fa0
+; LA32-NEXT: fmov.s $fa0, $fs0
; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
-; LA32-NEXT: fmul.s $fa0, $fa0, $fs0
+; LA32-NEXT: fmul.s $fa0, $fa0, $fs1
; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
-; LA32-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload
-; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
-; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: movfr2gr.s $a0, $fa0
+; LA32-NEXT: lu12i.w $a1, -16
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 32
; LA32-NEXT: ret
;
; LA64-LABEL: test_fmul_reg:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -32
; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
-; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
-; LA64-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill
-; LA64-NEXT: move $fp, $a0
-; LA64-NEXT: move $a0, $a1
-; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
; LA64-NEXT: fmov.s $fs0, $fa0
-; LA64-NEXT: move $a0, $fp
+; LA64-NEXT: fmov.s $fa0, $fa1
; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
-; LA64-NEXT: fmul.s $fa0, $fa0, $fs0
+; LA64-NEXT: fmov.s $fs1, $fa0
+; LA64-NEXT: fmov.s $fa0, $fs0
+; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64-NEXT: fmul.s $fa0, $fa0, $fs1
; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
-; LA64-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: lu12i.w $a1, -16
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 32
; LA64-NEXT: ret
@@ -272,20 +311,23 @@ define void @test_fmul_mem(ptr %p, ptr %q) nounwind {
; LA32-NEXT: addi.w $sp, $sp, -32
; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
; LA32-NEXT: st.w $fp, $sp, 24 # 4-byte Folded Spill
-; LA32-NEXT: st.w $s0, $sp, 20 # 4-byte Folded Spill
-; LA32-NEXT: fst.d $fs0, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
; LA32-NEXT: move $fp, $a0
-; LA32-NEXT: ld.hu $s0, $a0, 0
-; LA32-NEXT: ld.hu $a0, $a1, 0
+; LA32-NEXT: ld.hu $a0, $a0, 0
+; LA32-NEXT: ld.hu $a1, $a1, 0
+; LA32-NEXT: movgr2fr.w $fs0, $a0
+; LA32-NEXT: movgr2fr.w $fa0, $a1
; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
-; LA32-NEXT: fmov.s $fs0, $fa0
-; LA32-NEXT: move $a0, $s0
+; LA32-NEXT: fmov.s $fs1, $fa0
+; LA32-NEXT: fmov.s $fa0, $fs0
; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
-; LA32-NEXT: fmul.s $fa0, $fa0, $fs0
+; LA32-NEXT: fmul.s $fa0, $fa0, $fs1
; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: movfr2gr.s $a0, $fa0
; LA32-NEXT: st.h $a0, $fp, 0
-; LA32-NEXT: fld.d $fs0, $sp, 8 # 8-byte Folded Reload
-; LA32-NEXT: ld.w $s0, $sp, 20 # 4-byte Folded Reload
+; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
; LA32-NEXT: ld.w $fp, $sp, 24 # 4-byte Folded Reload
; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 32
@@ -301,12 +343,14 @@ define void @test_fmul_mem(ptr %p, ptr %q) nounwind {
; LA64-NEXT: move $fp, $a0
; LA64-NEXT: ld.hu $s0, $a0, 0
; LA64-NEXT: ld.hu $a0, $a1, 0
+; LA64-NEXT: movgr2fr.w $fa0, $a0
; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
; LA64-NEXT: fmov.s $fs0, $fa0
-; LA64-NEXT: move $a0, $s0
+; LA64-NEXT: movgr2fr.w $fa0, $s0
; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
; LA64-NEXT: fmul.s $fa0, $fa0, $fs0
; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: movfr2gr.s $a0, $fa0
; LA64-NEXT: st.h $a0, $fp, 0
; LA64-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
@@ -331,6 +375,10 @@ define half @freeze_half_undef() nounwind {
; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
; LA32-NEXT: fadd.s $fa0, $fa0, $fa0
; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: movfr2gr.s $a0, $fa0
+; LA32-NEXT: lu12i.w $a1, -16
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
@@ -344,6 +392,10 @@ define half @freeze_half_undef() nounwind {
; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
; LA64-NEXT: fadd.s $fa0, $fa0, $fa0
; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: lu12i.w $a1, -16
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: movgr2fr.w $fa0, $a0
; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 16
; LA64-NEXT: ret
@@ -360,6 +412,10 @@ define half @freeze_half_poison(half %maybe.poison) nounwind {
; LA32-NEXT: bl %plt(__gnu_h2f_ieee)
; LA32-NEXT: fadd.s $fa0, $fa0, $fa0
; LA32-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32-NEXT: movfr2gr.s $a0, $fa0
+; LA32-NEXT: lu12i.w $a1, -16
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: movgr2fr.w $fa0, $a0
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
@@ -371,6 +427,10 @@ define half @freeze_half_poison(half %maybe.poison) nounwind {
; LA64-NEXT: bl %plt(__gnu_h2f_ieee)
; LA64-NEXT: fadd.s $fa0, $fa0, $fa0
; LA64-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: lu12i.w $a1, -16
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: movgr2fr.w $fa0, $a0
; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 16
; LA64-NEXT: ret
>From cda651c62cd9858c4b5e8fff406ddfb38ea6dec3 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Fri, 20 Sep 2024 11:58:52 +0800
Subject: [PATCH 2/2] [LoongArch][test] Add tests for issue 97975 and 97981
---
llvm/test/CodeGen/LoongArch/issue97975.ll | 240 ++++++++++++++++++++++
llvm/test/CodeGen/LoongArch/issue97981.ll | 79 +++++++
2 files changed, 319 insertions(+)
create mode 100644 llvm/test/CodeGen/LoongArch/issue97975.ll
create mode 100644 llvm/test/CodeGen/LoongArch/issue97981.ll
diff --git a/llvm/test/CodeGen/LoongArch/issue97975.ll b/llvm/test/CodeGen/LoongArch/issue97975.ll
new file mode 100644
index 00000000000000..22a34f83308452
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/issue97975.ll
@@ -0,0 +1,240 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F
+; RUN: llc --mtriple=loongarch32 --mattr=+d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F
+; RUN: llc --mtriple=loongarch64 --mattr=+d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D
+
+define half @f(half %a, half %b, half %c) {
+; LA32S-LABEL: f:
+; LA32S: # %bb.0:
+; LA32S-NEXT: addi.w $sp, $sp, -16
+; LA32S-NEXT: .cfi_def_cfa_offset 16
+; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32S-NEXT: st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32S-NEXT: st.w $s1, $sp, 0 # 4-byte Folded Spill
+; LA32S-NEXT: .cfi_offset 1, -4
+; LA32S-NEXT: .cfi_offset 22, -8
+; LA32S-NEXT: .cfi_offset 23, -12
+; LA32S-NEXT: .cfi_offset 24, -16
+; LA32S-NEXT: move $fp, $a2
+; LA32S-NEXT: move $s0, $a0
+; LA32S-NEXT: bstrpick.w $a0, $a1, 15, 0
+; LA32S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32S-NEXT: move $s1, $a0
+; LA32S-NEXT: bstrpick.w $a0, $s0, 15, 0
+; LA32S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32S-NEXT: move $a1, $s1
+; LA32S-NEXT: bl %plt(__addsf3)
+; LA32S-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32S-NEXT: move $s0, $a0
+; LA32S-NEXT: bstrpick.w $a0, $fp, 15, 0
+; LA32S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32S-NEXT: move $fp, $a0
+; LA32S-NEXT: bstrpick.w $a0, $s0, 15, 0
+; LA32S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32S-NEXT: move $a1, $fp
+; LA32S-NEXT: bl %plt(__addsf3)
+; LA32S-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32S-NEXT: ld.w $s1, $sp, 0 # 4-byte Folded Reload
+; LA32S-NEXT: ld.w $s0, $sp, 4 # 4-byte Folded Reload
+; LA32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32S-NEXT: addi.w $sp, $sp, 16
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: f:
+; LA32F: # %bb.0:
+; LA32F-NEXT: addi.w $sp, $sp, -16
+; LA32F-NEXT: .cfi_def_cfa_offset 16
+; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT: fst.s $fs0, $sp, 8 # 4-byte Folded Spill
+; LA32F-NEXT: fst.s $fs1, $sp, 4 # 4-byte Folded Spill
+; LA32F-NEXT: fst.s $fs2, $sp, 0 # 4-byte Folded Spill
+; LA32F-NEXT: .cfi_offset 1, -4
+; LA32F-NEXT: .cfi_offset 56, -8
+; LA32F-NEXT: .cfi_offset 57, -12
+; LA32F-NEXT: .cfi_offset 58, -16
+; LA32F-NEXT: fmov.s $fs0, $fa2
+; LA32F-NEXT: fmov.s $fs1, $fa0
+; LA32F-NEXT: fmov.s $fa0, $fa1
+; LA32F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32F-NEXT: fmov.s $fs2, $fa0
+; LA32F-NEXT: fmov.s $fa0, $fs1
+; LA32F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32F-NEXT: fadd.s $fa0, $fa0, $fs2
+; LA32F-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32F-NEXT: fmov.s $fs1, $fa0
+; LA32F-NEXT: fmov.s $fa0, $fs0
+; LA32F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32F-NEXT: fadd.s $fa0, $fs1, $fa0
+; LA32F-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32F-NEXT: movfr2gr.s $a0, $fa0
+; LA32F-NEXT: lu12i.w $a1, -16
+; LA32F-NEXT: or $a0, $a0, $a1
+; LA32F-NEXT: movgr2fr.w $fa0, $a0
+; LA32F-NEXT: fld.s $fs2, $sp, 0 # 4-byte Folded Reload
+; LA32F-NEXT: fld.s $fs1, $sp, 4 # 4-byte Folded Reload
+; LA32F-NEXT: fld.s $fs0, $sp, 8 # 4-byte Folded Reload
+; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT: addi.w $sp, $sp, 16
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: f:
+; LA32D: # %bb.0:
+; LA32D-NEXT: addi.w $sp, $sp, -32
+; LA32D-NEXT: .cfi_def_cfa_offset 32
+; LA32D-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32D-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32D-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA32D-NEXT: fst.d $fs2, $sp, 0 # 8-byte Folded Spill
+; LA32D-NEXT: .cfi_offset 1, -4
+; LA32D-NEXT: .cfi_offset 56, -16
+; LA32D-NEXT: .cfi_offset 57, -24
+; LA32D-NEXT: .cfi_offset 58, -32
+; LA32D-NEXT: fmov.s $fs0, $fa2
+; LA32D-NEXT: fmov.s $fs1, $fa0
+; LA32D-NEXT: fmov.s $fa0, $fa1
+; LA32D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32D-NEXT: fmov.s $fs2, $fa0
+; LA32D-NEXT: fmov.s $fa0, $fs1
+; LA32D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32D-NEXT: fadd.s $fa0, $fa0, $fs2
+; LA32D-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32D-NEXT: fmov.s $fs1, $fa0
+; LA32D-NEXT: fmov.s $fa0, $fs0
+; LA32D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA32D-NEXT: fadd.s $fa0, $fs1, $fa0
+; LA32D-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA32D-NEXT: movfr2gr.s $a0, $fa0
+; LA32D-NEXT: lu12i.w $a1, -16
+; LA32D-NEXT: or $a0, $a0, $a1
+; LA32D-NEXT: movgr2fr.w $fa0, $a0
+; LA32D-NEXT: fld.d $fs2, $sp, 0 # 8-byte Folded Reload
+; LA32D-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32D-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32D-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32D-NEXT: addi.w $sp, $sp, 32
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: f:
+; LA64S: # %bb.0:
+; LA64S-NEXT: addi.d $sp, $sp, -32
+; LA64S-NEXT: .cfi_def_cfa_offset 32
+; LA64S-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64S-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64S-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64S-NEXT: st.d $s1, $sp, 0 # 8-byte Folded Spill
+; LA64S-NEXT: .cfi_offset 1, -8
+; LA64S-NEXT: .cfi_offset 22, -16
+; LA64S-NEXT: .cfi_offset 23, -24
+; LA64S-NEXT: .cfi_offset 24, -32
+; LA64S-NEXT: move $fp, $a2
+; LA64S-NEXT: move $s0, $a0
+; LA64S-NEXT: bstrpick.d $a0, $a1, 15, 0
+; LA64S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64S-NEXT: move $s1, $a0
+; LA64S-NEXT: bstrpick.d $a0, $s0, 15, 0
+; LA64S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64S-NEXT: move $a1, $s1
+; LA64S-NEXT: bl %plt(__addsf3)
+; LA64S-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64S-NEXT: move $s0, $a0
+; LA64S-NEXT: bstrpick.d $a0, $fp, 15, 0
+; LA64S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64S-NEXT: move $fp, $a0
+; LA64S-NEXT: bstrpick.d $a0, $s0, 15, 0
+; LA64S-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64S-NEXT: move $a1, $fp
+; LA64S-NEXT: bl %plt(__addsf3)
+; LA64S-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64S-NEXT: ld.d $s1, $sp, 0 # 8-byte Folded Reload
+; LA64S-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64S-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64S-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64S-NEXT: addi.d $sp, $sp, 32
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: f:
+; LA64F: # %bb.0:
+; LA64F-NEXT: addi.d $sp, $sp, -32
+; LA64F-NEXT: .cfi_def_cfa_offset 32
+; LA64F-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64F-NEXT: fst.s $fs0, $sp, 20 # 4-byte Folded Spill
+; LA64F-NEXT: fst.s $fs1, $sp, 16 # 4-byte Folded Spill
+; LA64F-NEXT: fst.s $fs2, $sp, 12 # 4-byte Folded Spill
+; LA64F-NEXT: .cfi_offset 1, -8
+; LA64F-NEXT: .cfi_offset 56, -12
+; LA64F-NEXT: .cfi_offset 57, -16
+; LA64F-NEXT: .cfi_offset 58, -20
+; LA64F-NEXT: fmov.s $fs0, $fa2
+; LA64F-NEXT: fmov.s $fs1, $fa0
+; LA64F-NEXT: fmov.s $fa0, $fa1
+; LA64F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64F-NEXT: fmov.s $fs2, $fa0
+; LA64F-NEXT: fmov.s $fa0, $fs1
+; LA64F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64F-NEXT: fadd.s $fa0, $fa0, $fs2
+; LA64F-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64F-NEXT: fmov.s $fs1, $fa0
+; LA64F-NEXT: fmov.s $fa0, $fs0
+; LA64F-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64F-NEXT: fadd.s $fa0, $fs1, $fa0
+; LA64F-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64F-NEXT: movfr2gr.s $a0, $fa0
+; LA64F-NEXT: lu12i.w $a1, -16
+; LA64F-NEXT: or $a0, $a0, $a1
+; LA64F-NEXT: movgr2fr.w $fa0, $a0
+; LA64F-NEXT: fld.s $fs2, $sp, 12 # 4-byte Folded Reload
+; LA64F-NEXT: fld.s $fs1, $sp, 16 # 4-byte Folded Reload
+; LA64F-NEXT: fld.s $fs0, $sp, 20 # 4-byte Folded Reload
+; LA64F-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64F-NEXT: addi.d $sp, $sp, 32
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: f:
+; LA64D: # %bb.0:
+; LA64D-NEXT: addi.d $sp, $sp, -32
+; LA64D-NEXT: .cfi_def_cfa_offset 32
+; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64D-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA64D-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA64D-NEXT: fst.d $fs2, $sp, 0 # 8-byte Folded Spill
+; LA64D-NEXT: .cfi_offset 1, -8
+; LA64D-NEXT: .cfi_offset 56, -16
+; LA64D-NEXT: .cfi_offset 57, -24
+; LA64D-NEXT: .cfi_offset 58, -32
+; LA64D-NEXT: fmov.s $fs0, $fa2
+; LA64D-NEXT: fmov.s $fs1, $fa0
+; LA64D-NEXT: fmov.s $fa0, $fa1
+; LA64D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64D-NEXT: fmov.s $fs2, $fa0
+; LA64D-NEXT: fmov.s $fa0, $fs1
+; LA64D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64D-NEXT: fadd.s $fa0, $fa0, $fs2
+; LA64D-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64D-NEXT: fmov.s $fs1, $fa0
+; LA64D-NEXT: fmov.s $fa0, $fs0
+; LA64D-NEXT: bl %plt(__gnu_h2f_ieee)
+; LA64D-NEXT: fadd.s $fa0, $fs1, $fa0
+; LA64D-NEXT: bl %plt(__gnu_f2h_ieee)
+; LA64D-NEXT: movfr2gr.s $a0, $fa0
+; LA64D-NEXT: lu12i.w $a1, -16
+; LA64D-NEXT: or $a0, $a0, $a1
+; LA64D-NEXT: movgr2fr.w $fa0, $a0
+; LA64D-NEXT: fld.d $fs2, $sp, 0 # 8-byte Folded Reload
+; LA64D-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA64D-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA64D-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64D-NEXT: addi.d $sp, $sp, 32
+; LA64D-NEXT: ret
+ %d = fadd half %a, %b
+ %e = fadd half %d, %c
+ ret half %e
+}
diff --git a/llvm/test/CodeGen/LoongArch/issue97981.ll b/llvm/test/CodeGen/LoongArch/issue97981.ll
new file mode 100644
index 00000000000000..414ded030218b1
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/issue97981.ll
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32S
+; RUN: llc --mtriple=loongarch32 --mattr=+f --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F
+; RUN: llc --mtriple=loongarch32 --mattr=+d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64S
+; RUN: llc --mtriple=loongarch64 --mattr=+f --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F
+; RUN: llc --mtriple=loongarch64 --mattr=+d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D
+
+define half @to_half(i16 %bits) {
+; LA32S-LABEL: to_half:
+; LA32S: # %bb.0:
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: to_half:
+; LA32F: # %bb.0:
+; LA32F-NEXT: lu12i.w $a1, -16
+; LA32F-NEXT: or $a0, $a0, $a1
+; LA32F-NEXT: movgr2fr.w $fa0, $a0
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: to_half:
+; LA32D: # %bb.0:
+; LA32D-NEXT: lu12i.w $a1, -16
+; LA32D-NEXT: or $a0, $a0, $a1
+; LA32D-NEXT: movgr2fr.w $fa0, $a0
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: to_half:
+; LA64S: # %bb.0:
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: to_half:
+; LA64F: # %bb.0:
+; LA64F-NEXT: lu12i.w $a1, -16
+; LA64F-NEXT: or $a0, $a0, $a1
+; LA64F-NEXT: movgr2fr.w $fa0, $a0
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: to_half:
+; LA64D: # %bb.0:
+; LA64D-NEXT: lu12i.w $a1, -16
+; LA64D-NEXT: or $a0, $a0, $a1
+; LA64D-NEXT: movgr2fr.w $fa0, $a0
+; LA64D-NEXT: ret
+ %f = bitcast i16 %bits to half
+ ret half %f
+}
+
+define i16 @from_half(half %f) {
+; LA32S-LABEL: from_half:
+; LA32S: # %bb.0:
+; LA32S-NEXT: ret
+;
+; LA32F-LABEL: from_half:
+; LA32F: # %bb.0:
+; LA32F-NEXT: movfr2gr.s $a0, $fa0
+; LA32F-NEXT: ret
+;
+; LA32D-LABEL: from_half:
+; LA32D: # %bb.0:
+; LA32D-NEXT: movfr2gr.s $a0, $fa0
+; LA32D-NEXT: ret
+;
+; LA64S-LABEL: from_half:
+; LA64S: # %bb.0:
+; LA64S-NEXT: ret
+;
+; LA64F-LABEL: from_half:
+; LA64F: # %bb.0:
+; LA64F-NEXT: movfr2gr.s $a0, $fa0
+; LA64F-NEXT: ret
+;
+; LA64D-LABEL: from_half:
+; LA64D: # %bb.0:
+; LA64D-NEXT: movfr2gr.s $a0, $fa0
+; LA64D-NEXT: ret
+ %bits = bitcast half %f to i16
+ ret i16 %bits
+}
More information about the llvm-commits
mailing list