[libc-commits] [libc] [llvm] Support STRICT_UINT_TO_FP and STRICT_SINT_TO_FP (PR #102503)
Mikhail R. Gadelha via libc-commits
libc-commits at lists.llvm.org
Thu Sep 5 09:35:22 PDT 2024
https://github.com/mikhailramalho updated https://github.com/llvm/llvm-project/pull/102503
>From 7807c1d8896d9add94a1335127777aa237ad3e7a Mon Sep 17 00:00:00 2001
From: "Mikhail R. Gadelha" <mikhail at igalia.com>
Date: Thu, 5 Sep 2024 13:22:00 -0300
Subject: [PATCH] foo
Signed-off-by: Mikhail R. Gadelha <mikhail at igalia.com>
---
libc/config/linux/riscv/entrypoints.txt | 89 +++++
.../SelectionDAG/LegalizeFloatTypes.cpp | 79 ++--
.../SelectionDAG/LegalizeIntegerTypes.cpp | 7 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 25 +-
.../test/CodeGen/RISCV/half-convert-strict.ll | 352 ++++++++++++++++++
5 files changed, 500 insertions(+), 52 deletions(-)
diff --git a/libc/config/linux/riscv/entrypoints.txt b/libc/config/linux/riscv/entrypoints.txt
index 9a2746dcb86f87..b5f2b51e8e6552 100644
--- a/libc/config/linux/riscv/entrypoints.txt
+++ b/libc/config/linux/riscv/entrypoints.txt
@@ -590,6 +590,95 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.ufromfpxl
)
+if(LIBC_TYPES_HAS_FLOAT16)
+ list(APPEND TARGET_LIBM_ENTRYPOINTS
+ # math.h C23 _Float16 entrypoints
+ libc.src.math.canonicalizef16
+ libc.src.math.ceilf16
+ libc.src.math.copysignf16
+ libc.src.math.exp10f16
+ libc.src.math.exp2f16
+ libc.src.math.expf16
+ libc.src.math.expm1f16
+ libc.src.math.f16add
+ libc.src.math.f16addf
+ libc.src.math.f16addl
+ libc.src.math.f16div
+ libc.src.math.f16divf
+ libc.src.math.f16divl
+ libc.src.math.f16fma
+ libc.src.math.f16fmaf
+ libc.src.math.f16fmal
+ libc.src.math.f16mul
+ libc.src.math.f16mulf
+ libc.src.math.f16mull
+ libc.src.math.f16sqrt
+ libc.src.math.f16sqrtf
+ libc.src.math.f16sqrtl
+ libc.src.math.f16sub
+ libc.src.math.f16subf
+ libc.src.math.f16subl
+ libc.src.math.fabsf16
+ libc.src.math.fdimf16
+ libc.src.math.floorf16
+ libc.src.math.fmaxf16
+ libc.src.math.fmaximum_mag_numf16
+ libc.src.math.fmaximum_magf16
+ libc.src.math.fmaximum_numf16
+ libc.src.math.fmaximumf16
+ libc.src.math.fminf16
+ libc.src.math.fminimum_mag_numf16
+ libc.src.math.fminimum_magf16
+ libc.src.math.fminimum_numf16
+ libc.src.math.fminimumf16
+ libc.src.math.fmodf16
+ libc.src.math.frexpf16
+ libc.src.math.fromfpf16
+ libc.src.math.fromfpxf16
+ libc.src.math.getpayloadf16
+ libc.src.math.ilogbf16
+ libc.src.math.ldexpf16
+ libc.src.math.llogbf16
+ libc.src.math.llrintf16
+ libc.src.math.llroundf16
+ libc.src.math.logbf16
+ libc.src.math.lrintf16
+ libc.src.math.lroundf16
+ libc.src.math.modff16
+ libc.src.math.nanf16
+ libc.src.math.nearbyintf16
+ libc.src.math.nextafterf16
+ libc.src.math.nextdownf16
+ libc.src.math.nexttowardf16
+ libc.src.math.nextupf16
+ libc.src.math.remainderf16
+ libc.src.math.remquof16
+ libc.src.math.rintf16
+ libc.src.math.roundevenf16
+ libc.src.math.roundf16
+ libc.src.math.scalblnf16
+ libc.src.math.scalbnf16
+ libc.src.math.setpayloadf16
+ libc.src.math.setpayloadsigf16
+ libc.src.math.totalorderf16
+ libc.src.math.totalordermagf16
+ libc.src.math.truncf16
+ libc.src.math.ufromfpf16
+ libc.src.math.ufromfpxf16
+ )
+
+ if(LIBC_TYPES_HAS_FLOAT128)
+ list(APPEND TARGET_LIBM_ENTRYPOINTS
+ # math.h C23 mixed _Float16 and _Float128 entrypoints
+ libc.src.math.f16addf128
+ libc.src.math.f16divf128
+ libc.src.math.f16fmaf128
+ libc.src.math.f16mulf128
+ libc.src.math.f16sqrtf128
+ libc.src.math.f16subf128
+ )
+ endif()
+endif()
if(LIBC_TYPES_HAS_FLOAT128)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# math.h C23 _Float128 entrypoints
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index b5c80005a0ecc1..09c2c20fb8a2e2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -2385,32 +2385,26 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_LLRINT(SDNode *N) {
//
static ISD::NodeType GetPromotionOpcode(EVT OpVT, EVT RetVT) {
- if (OpVT == MVT::f16) {
+ if (OpVT == MVT::f16)
return ISD::FP16_TO_FP;
- } else if (RetVT == MVT::f16) {
+ if (RetVT == MVT::f16)
return ISD::FP_TO_FP16;
- } else if (OpVT == MVT::bf16) {
+ if (OpVT == MVT::bf16)
return ISD::BF16_TO_FP;
- } else if (RetVT == MVT::bf16) {
+ if (RetVT == MVT::bf16)
return ISD::FP_TO_BF16;
- }
-
report_fatal_error("Attempt at an invalid promotion-related conversion");
}
static ISD::NodeType GetPromotionOpcodeStrict(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::f16)
return ISD::STRICT_FP16_TO_FP;
-
if (RetVT == MVT::f16)
return ISD::STRICT_FP_TO_FP16;
-
if (OpVT == MVT::bf16)
return ISD::STRICT_BF16_TO_FP;
-
if (RetVT == MVT::bf16)
return ISD::STRICT_FP_TO_BF16;
-
report_fatal_error("Attempt at an invalid promotion-related conversion");
}
@@ -3138,6 +3132,8 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
break;
case ISD::SELECT: R = SoftPromoteHalfRes_SELECT(N); break;
case ISD::SELECT_CC: R = SoftPromoteHalfRes_SELECT_CC(N); break;
+ case ISD::STRICT_SINT_TO_FP:
+ case ISD::STRICT_UINT_TO_FP:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP: R = SoftPromoteHalfRes_XINT_TO_FP(N); break;
case ISD::UNDEF: R = SoftPromoteHalfRes_UNDEF(N); break;
@@ -3288,19 +3284,13 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfRes_FFREXP(SDNode *N) {
SDValue DAGTypeLegalizer::SoftPromoteHalfRes_FP_ROUND(SDNode *N) {
EVT RVT = N->getValueType(0);
- EVT SVT = N->getOperand(0).getValueType();
+ bool IsStrict = N->isStrictFPOpcode();
+ SDValue Op = N->getOperand(IsStrict ? 1 : 0);
+ EVT SVT = Op.getValueType();
- if (N->isStrictFPOpcode()) {
- // FIXME: assume we only have two f16 variants for now.
- unsigned Opcode;
- if (RVT == MVT::f16)
- Opcode = ISD::STRICT_FP_TO_FP16;
- else if (RVT == MVT::bf16)
- Opcode = ISD::STRICT_FP_TO_BF16;
- else
- llvm_unreachable("unknown half type");
- SDValue Res = DAG.getNode(Opcode, SDLoc(N), {MVT::i16, MVT::Other},
- {N->getOperand(0), N->getOperand(1)});
+ if (IsStrict) {
+ SDValue Res = DAG.getNode(GetPromotionOpcodeStrict(SVT, RVT), SDLoc(N),
+ {MVT::i16, MVT::Other}, {N->getOperand(0), Op});
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
return Res;
}
@@ -3359,6 +3349,16 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfRes_XINT_TO_FP(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
SDLoc dl(N);
+ if (N->isStrictFPOpcode()) {
+ SDValue Op = DAG.getNode(N->getOpcode(), dl, {NVT, MVT::Other},
+ {N->getOperand(0), N->getOperand(1)});
+ Op = DAG.getNode(GetPromotionOpcodeStrict(NVT, OVT), dl,
+ {MVT::i16, MVT::Other}, {N->getOperand(0), Op});
+ ReplaceValueWith(SDValue(N, 1), Op.getValue(1));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Op.getValue(1));
+ return Op;
+ }
+
SDValue Res = DAG.getNode(N->getOpcode(), dl, NVT, N->getOperand(0));
// Round the value to the softened type.
@@ -3447,6 +3447,8 @@ bool DAGTypeLegalizer::SoftPromoteHalfOperand(SDNode *N, unsigned OpNo) {
Res = SoftPromoteHalfOp_FAKE_USE(N, OpNo);
break;
case ISD::FCOPYSIGN: Res = SoftPromoteHalfOp_FCOPYSIGN(N, OpNo); break;
+ case ISD::STRICT_FP_TO_SINT:
+ case ISD::STRICT_FP_TO_UINT:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: Res = SoftPromoteHalfOp_FP_TO_XINT(N); break;
case ISD::FP_TO_SINT_SAT:
@@ -3473,7 +3475,7 @@ bool DAGTypeLegalizer::SoftPromoteHalfOperand(SDNode *N, unsigned OpNo) {
assert(Res.getNode() != N && "Expected a new node!");
- assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
+ assert(Res.getValueType() == N->getValueType(0) &&
"Invalid operand expansion");
ReplaceValueWith(SDValue(N, 0), Res);
@@ -3517,16 +3519,8 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfOp_FP_EXTEND(SDNode *N) {
Op = GetSoftPromotedHalf(N->getOperand(IsStrict ? 1 : 0));
if (IsStrict) {
- unsigned Opcode;
- if (SVT == MVT::f16)
- Opcode = ISD::STRICT_FP16_TO_FP;
- else if (SVT == MVT::bf16)
- Opcode = ISD::STRICT_BF16_TO_FP;
- else
- llvm_unreachable("unknown half type");
- SDValue Res =
- DAG.getNode(Opcode, SDLoc(N), {N->getValueType(0), MVT::Other},
- {N->getOperand(0), Op});
+ SDValue Res = DAG.getNode(GetPromotionOpcodeStrict(SVT, RVT), SDLoc(N),
+ {RVT, MVT::Other}, {N->getOperand(0), Op});
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
ReplaceValueWith(SDValue(N, 0), Res);
return SDValue();
@@ -3537,17 +3531,26 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfOp_FP_EXTEND(SDNode *N) {
SDValue DAGTypeLegalizer::SoftPromoteHalfOp_FP_TO_XINT(SDNode *N) {
EVT RVT = N->getValueType(0);
- SDValue Op = N->getOperand(0);
+ bool IsStrict = N->isStrictFPOpcode();
+ SDValue Op = N->getOperand(IsStrict ? 1 : 0);
EVT SVT = Op.getValueType();
SDLoc dl(N);
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType());
-
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), SVT);
Op = GetSoftPromotedHalf(Op);
- SDValue Res = DAG.getNode(GetPromotionOpcode(SVT, RVT), dl, NVT, Op);
+ if (IsStrict) {
+ Op = DAG.getNode(GetPromotionOpcodeStrict(SVT, RVT), dl, {NVT, MVT::Other},
+ {N->getOperand(0), Op});
+ Op = DAG.getNode(N->getOpcode(), dl, {RVT, MVT::Other},
+ {N->getOperand(0), Op});
+ ReplaceValueWith(SDValue(N, 1), Op.getValue(1));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Op.getValue(1));
+ return Op;
+ }
- return DAG.getNode(N->getOpcode(), dl, N->getValueType(0), Res);
+ SDValue Res = DAG.getNode(GetPromotionOpcode(SVT, RVT), dl, NVT, Op);
+ return DAG.getNode(N->getOpcode(), dl, RVT, Res);
}
SDValue DAGTypeLegalizer::SoftPromoteHalfOp_FP_TO_XINT_SAT(SDNode *N) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index a1cb74f43e6050..044ac49224b5c5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -3952,12 +3952,7 @@ void DAGTypeLegalizer::ExpandIntRes_FP_TO_XINT(SDNode *N, SDValue &Lo,
Op = GetPromotedFloat(Op);
if (getTypeAction(Op.getValueType()) == TargetLowering::TypeSoftPromoteHalf) {
- EVT OFPVT = Op.getValueType();
- EVT NFPVT = TLI.getTypeToTransformTo(*DAG.getContext(), OFPVT);
- Op = GetSoftPromotedHalf(Op);
- Op = DAG.getNode(OFPVT == MVT::f16 ? ISD::FP16_TO_FP : ISD::BF16_TO_FP, dl,
- NFPVT, Op);
- Op = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl, VT, Op);
+ Op = SoftPromoteHalfOp_FP_TO_XINT(N);
SplitInteger(Op, Lo, Hi);
return;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d400b2ea1ca2ca..fae2fa102d2992 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -529,6 +529,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
Subtarget.isSoftFPABI() ? LibCall : Custom);
setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
setOperationAction(ISD::FP16_TO_FP, MVT::f32, Custom);
+ setOperationAction(ISD::STRICT_FP_TO_FP16, MVT::f32, Custom);
+ setOperationAction(ISD::STRICT_FP16_TO_FP, MVT::f32, Custom);
if (Subtarget.hasStdExtZfa()) {
setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
@@ -577,6 +579,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
Subtarget.isSoftFPABI() ? LibCall : Custom);
setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom);
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
+ setOperationAction(ISD::STRICT_FP_TO_FP16, MVT::f64, Custom);
+ setOperationAction(ISD::STRICT_FP16_TO_FP, MVT::f64, Expand);
}
if (Subtarget.is64Bit()) {
@@ -6851,30 +6855,35 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return DAG.getNode(ISD::FP_EXTEND, DL, VT, Res);
return Res;
}
+ case ISD::STRICT_FP_TO_FP16:
case ISD::FP_TO_FP16: {
// Custom lower to ensure the libcall return is passed in an FPR on hard
// float ABIs.
assert(Subtarget.hasStdExtFOrZfinx() && "Unexpected custom legalisation");
SDLoc DL(Op);
MakeLibCallOptions CallOptions;
- RTLIB::Libcall LC =
- RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::f16);
- SDValue Res =
- makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
+ bool IsStrict = Op->isStrictFPOpcode();
+ SDValue Op0 = IsStrict ? Op.getOperand(1) : Op.getOperand(0);
+ RTLIB::Libcall LC = RTLIB::getFPROUND(Op0.getValueType(), MVT::f16);
+ SDValue Res = makeLibCall(DAG, LC, MVT::f32, Op0, CallOptions, DL).first;
if (Subtarget.is64Bit())
return DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Res);
- return DAG.getBitcast(MVT::i32, Res);
+ SDValue Result = DAG.getBitcast(MVT::i32, IsStrict ? Res.getValue(0) : Res);
+ if (IsStrict)
+ return DAG.getMergeValues({Result, Op.getOperand(0)}, DL);
+ return Result;
}
+ case ISD::STRICT_FP16_TO_FP:
case ISD::FP16_TO_FP: {
// Custom lower to ensure the libcall argument is passed in an FPR on hard
// float ABIs.
assert(Subtarget.hasStdExtFOrZfinx() && "Unexpected custom legalisation");
SDLoc DL(Op);
MakeLibCallOptions CallOptions;
+ SDValue Op0 = Op->isStrictFPOpcode() ? Op.getOperand(1) : Op.getOperand(0);
SDValue Arg = Subtarget.is64Bit()
- ? DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32,
- Op.getOperand(0))
- : DAG.getBitcast(MVT::f32, Op.getOperand(0));
+ ? DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Op0)
+ : DAG.getBitcast(MVT::f32, Op0);
SDValue Res =
makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg, CallOptions, DL)
.first;
diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
index 6bd3ef775609ec..f6c2e8a0f7e13e 100644
--- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
@@ -47,6 +47,9 @@
; RUN: llc -mtriple=riscv64 -mattr=+zdinx,+zhinxmin -verify-machineinstrs \
; RUN: -target-abi lp64 -disable-strictnode-mutation < %s \
; RUN: | FileCheck -check-prefixes=CHECK64-IZDINXZHINXMIN %s
+; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs \
+; RUN: -target-abi ilp32d -disable-strictnode-mutation < %s \
+; RUN: | FileCheck -check-prefixes=CHECK32-D,RV32D %s
; NOTE: The rounding mode metadata does not effect which instruction is
; selected. Dynamic rounding mode is always used for operations that
@@ -128,6 +131,20 @@ define i16 @fcvt_si_h(half %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_si_h:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srli a0, a0, 16
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: call __extendhfsf2
+; CHECK32-D-NEXT: fcvt.w.s a0, fa0, rtz
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call i16 @llvm.experimental.constrained.fptosi.i16.f16(half %a, metadata !"fpexcept.strict")
ret i16 %1
}
@@ -209,6 +226,20 @@ define i16 @fcvt_ui_h(half %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_ui_h:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srli a0, a0, 16
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: call __extendhfsf2
+; CHECK32-D-NEXT: fcvt.wu.s a0, fa0, rtz
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call i16 @llvm.experimental.constrained.fptoui.i16.f16(half %a, metadata !"fpexcept.strict")
ret i16 %1
}
@@ -280,6 +311,20 @@ define i32 @fcvt_w_h(half %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_w_h:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srli a0, a0, 16
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: call __extendhfsf2
+; CHECK32-D-NEXT: fcvt.w.s a0, fa0, rtz
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict")
ret i32 %1
}
@@ -351,6 +396,20 @@ define i32 @fcvt_wu_h(half %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_wu_h:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srli a0, a0, 16
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: call __extendhfsf2
+; CHECK32-D-NEXT: fcvt.wu.s a0, fa0, rtz
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict")
ret i32 %1
}
@@ -449,6 +508,24 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: seqz a1, a0
; CHECK64-IZDINXZHINXMIN-NEXT: add a0, a0, a1
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_wu_h_multiple_use:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: .cfi_def_cfa_offset 16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: .cfi_offset ra, -4
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srli a0, a0, 16
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: call __extendhfsf2
+; CHECK32-D-NEXT: fcvt.wu.s a0, fa0, rtz
+; CHECK32-D-NEXT: seqz a1, a0
+; CHECK32-D-NEXT: add a0, a0, a1
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%a = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict")
%b = icmp eq i32 %a, 0
%c = select i1 %b, i32 1, i32 %a
@@ -556,6 +633,20 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_l_h:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srli a0, a0, 16
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: call __extendhfsf2
+; CHECK32-D-NEXT: call __fixsfdi
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %a, metadata !"fpexcept.strict")
ret i64 %1
}
@@ -662,6 +753,20 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_lu_h:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srli a0, a0, 16
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: call __extendhfsf2
+; CHECK32-D-NEXT: call __fixunssfdi
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %a, metadata !"fpexcept.strict")
ret i64 %1
}
@@ -771,6 +876,22 @@ define half @fcvt_h_si(i16 %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_si:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srai a0, a0, 16
+; CHECK32-D-NEXT: fcvt.s.w fa0, a0
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
}
@@ -842,6 +963,20 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_si_signext:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fcvt.s.w fa0, a0
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
}
@@ -950,6 +1085,22 @@ define half @fcvt_h_ui(i16 %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_ui:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srli a0, a0, 16
+; CHECK32-D-NEXT: fcvt.s.wu fa0, a0
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
}
@@ -1021,6 +1172,20 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_ui_zeroext:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fcvt.s.wu fa0, a0
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
}
@@ -1091,6 +1256,20 @@ define half @fcvt_h_w(i32 %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_w:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fcvt.s.w fa0, a0
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
}
@@ -1174,6 +1353,21 @@ define half @fcvt_h_w_load(ptr %p) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_w_load:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: lw a0, 0(a0)
+; CHECK32-D-NEXT: fcvt.s.w fa0, a0
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%a = load i32, ptr %p
%1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
@@ -1245,6 +1439,20 @@ define half @fcvt_h_wu(i32 %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_wu:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fcvt.s.wu fa0, a0
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
}
@@ -1340,6 +1548,21 @@ define half @fcvt_h_wu_load(ptr %p) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_wu_load:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: lw a0, 0(a0)
+; CHECK32-D-NEXT: fcvt.s.wu fa0, a0
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%a = load i32, ptr %p
%1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
@@ -1446,6 +1669,20 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_l:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: call __floatdisf
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
}
@@ -1552,6 +1789,20 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_lu:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: call __floatundisf
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
}
@@ -1617,6 +1868,19 @@ define half @fcvt_h_s(float %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN: # %bb.0:
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_s:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
}
@@ -1682,6 +1946,19 @@ define float @fcvt_s_h(half %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN: # %bb.0:
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_s_h:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srli a0, a0, 16
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: call __extendhfsf2
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call float @llvm.experimental.constrained.fpext.f32.f16(half %a, metadata !"fpexcept.strict")
ret float %1
}
@@ -1799,6 +2076,19 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN: # %bb.0:
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_d:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: call __truncdfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: lui a1, 1048560
+; CHECK32-D-NEXT: or a0, a0, a1
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call half @llvm.experimental.constrained.fptrunc.f16.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret half %1
}
@@ -1924,6 +2214,20 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
; CHECK64-IZDINXZHINXMIN: # %bb.0:
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_d_h:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: slli a0, a0, 16
+; CHECK32-D-NEXT: srli a0, a0, 16
+; CHECK32-D-NEXT: fmv.w.x fa0, a0
+; CHECK32-D-NEXT: call __extendhfsf2
+; CHECK32-D-NEXT: fcvt.d.s fa0, fa0
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%1 = call double @llvm.experimental.constrained.fpext.f64.f16(half %a, metadata !"fpexcept.strict")
ret double %1
}
@@ -2034,6 +2338,29 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a2, a2
; CHECK64-IZDINXZHINXMIN-NEXT: sh a2, 0(a1)
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_w_demanded_bits:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: .cfi_def_cfa_offset 16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: .cfi_offset ra, -4
+; CHECK32-D-NEXT: .cfi_offset s0, -8
+; CHECK32-D-NEXT: .cfi_offset s1, -12
+; CHECK32-D-NEXT: mv s0, a1
+; CHECK32-D-NEXT: addi s1, a0, 1
+; CHECK32-D-NEXT: fcvt.s.w fa0, s1
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: sh a0, 0(s0)
+; CHECK32-D-NEXT: mv a0, s1
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%3 = add i32 %0, 1
%4 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
store half %4, ptr %1, align 2
@@ -2145,8 +2472,33 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) strictfp {
; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a2, a2
; CHECK64-IZDINXZHINXMIN-NEXT: sh a2, 0(a1)
; CHECK64-IZDINXZHINXMIN-NEXT: ret
+;
+; CHECK32-D-LABEL: fcvt_h_wu_demanded_bits:
+; CHECK32-D: # %bb.0:
+; CHECK32-D-NEXT: addi sp, sp, -16
+; CHECK32-D-NEXT: .cfi_def_cfa_offset 16
+; CHECK32-D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; CHECK32-D-NEXT: .cfi_offset ra, -4
+; CHECK32-D-NEXT: .cfi_offset s0, -8
+; CHECK32-D-NEXT: .cfi_offset s1, -12
+; CHECK32-D-NEXT: mv s0, a1
+; CHECK32-D-NEXT: addi s1, a0, 1
+; CHECK32-D-NEXT: fcvt.s.wu fa0, s1
+; CHECK32-D-NEXT: call __truncsfhf2
+; CHECK32-D-NEXT: fmv.x.w a0, fa0
+; CHECK32-D-NEXT: sh a0, 0(s0)
+; CHECK32-D-NEXT: mv a0, s1
+; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; CHECK32-D-NEXT: addi sp, sp, 16
+; CHECK32-D-NEXT: ret
%3 = add i32 %0, 1
%4 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
store half %4, ptr %1, align 2
ret i32 %3
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32D: {{.*}}
More information about the libc-commits
mailing list