[llvm] ebfff46 - [LegalizeTypes][FPEnv][X86] Add initial support for softening strict fp nodes
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 27 10:52:00 PST 2019
Author: Craig Topper
Date: 2019-11-27T10:50:10-08:00
New Revision: ebfff46c8d29efd9767a24043766ddd588db26c3
URL: https://github.com/llvm/llvm-project/commit/ebfff46c8d29efd9767a24043766ddd588db26c3
DIFF: https://github.com/llvm/llvm-project/commit/ebfff46c8d29efd9767a24043766ddd588db26c3.diff
LOG: [LegalizeTypes][FPEnv][X86] Add initial support for softening strict fp nodes
This is based on what's required for softening fp128 operations on 32-bit X86 assuming f32/f64/f80 are legal. So there could be some things missing.
Differential Revision: https://reviews.llvm.org/D70654
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 65ccb996299d..f622f0a6306d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -65,35 +65,60 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
case ISD::EXTRACT_VECTOR_ELT:
R = SoftenFloatRes_EXTRACT_VECTOR_ELT(N, ResNo); break;
case ISD::FABS: R = SoftenFloatRes_FABS(N); break;
+ case ISD::STRICT_FMINNUM:
case ISD::FMINNUM: R = SoftenFloatRes_FMINNUM(N); break;
+ case ISD::STRICT_FMAXNUM:
case ISD::FMAXNUM: R = SoftenFloatRes_FMAXNUM(N); break;
+ case ISD::STRICT_FADD:
case ISD::FADD: R = SoftenFloatRes_FADD(N); break;
case ISD::FCBRT: R = SoftenFloatRes_FCBRT(N); break;
+ case ISD::STRICT_FCEIL:
case ISD::FCEIL: R = SoftenFloatRes_FCEIL(N); break;
case ISD::FCOPYSIGN: R = SoftenFloatRes_FCOPYSIGN(N); break;
+ case ISD::STRICT_FCOS:
case ISD::FCOS: R = SoftenFloatRes_FCOS(N); break;
+ case ISD::STRICT_FDIV:
case ISD::FDIV: R = SoftenFloatRes_FDIV(N); break;
+ case ISD::STRICT_FEXP:
case ISD::FEXP: R = SoftenFloatRes_FEXP(N); break;
+ case ISD::STRICT_FEXP2:
case ISD::FEXP2: R = SoftenFloatRes_FEXP2(N); break;
+ case ISD::STRICT_FFLOOR:
case ISD::FFLOOR: R = SoftenFloatRes_FFLOOR(N); break;
+ case ISD::STRICT_FLOG:
case ISD::FLOG: R = SoftenFloatRes_FLOG(N); break;
+ case ISD::STRICT_FLOG2:
case ISD::FLOG2: R = SoftenFloatRes_FLOG2(N); break;
+ case ISD::STRICT_FLOG10:
case ISD::FLOG10: R = SoftenFloatRes_FLOG10(N); break;
+ case ISD::STRICT_FMA:
case ISD::FMA: R = SoftenFloatRes_FMA(N); break;
+ case ISD::STRICT_FMUL:
case ISD::FMUL: R = SoftenFloatRes_FMUL(N); break;
+ case ISD::STRICT_FNEARBYINT:
case ISD::FNEARBYINT: R = SoftenFloatRes_FNEARBYINT(N); break;
case ISD::FNEG: R = SoftenFloatRes_FNEG(N); break;
+ case ISD::STRICT_FP_EXTEND:
case ISD::FP_EXTEND: R = SoftenFloatRes_FP_EXTEND(N); break;
case ISD::FP_ROUND: R = SoftenFloatRes_FP_ROUND(N); break;
case ISD::FP16_TO_FP: R = SoftenFloatRes_FP16_TO_FP(N); break;
+ case ISD::STRICT_FPOW:
case ISD::FPOW: R = SoftenFloatRes_FPOW(N); break;
+ case ISD::STRICT_FPOWI:
case ISD::FPOWI: R = SoftenFloatRes_FPOWI(N); break;
+ case ISD::STRICT_FREM:
case ISD::FREM: R = SoftenFloatRes_FREM(N); break;
+ case ISD::STRICT_FRINT:
case ISD::FRINT: R = SoftenFloatRes_FRINT(N); break;
+ case ISD::STRICT_FROUND:
case ISD::FROUND: R = SoftenFloatRes_FROUND(N); break;
+ case ISD::STRICT_FSIN:
case ISD::FSIN: R = SoftenFloatRes_FSIN(N); break;
+ case ISD::STRICT_FSQRT:
case ISD::FSQRT: R = SoftenFloatRes_FSQRT(N); break;
+ case ISD::STRICT_FSUB:
case ISD::FSUB: R = SoftenFloatRes_FSUB(N); break;
+ case ISD::STRICT_FTRUNC:
case ISD::FTRUNC: R = SoftenFloatRes_FTRUNC(N); break;
case ISD::LOAD: R = SoftenFloatRes_LOAD(N); break;
case ISD::ATOMIC_SWAP: R = BitcastToInt_ATOMIC_SWAP(N); break;
@@ -113,25 +138,43 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_Unary(SDNode *N, RTLIB::Libcall LC) {
+ bool IsStrict = N->isStrictFPOpcode();
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
- assert(N->getNumOperands() == 1 && "Unexpected number of operands!");
- SDValue Op = GetSoftenedFloat(N->getOperand(0));
+ unsigned Offset = IsStrict ? 1 : 0;
+ assert(N->getNumOperands() == (1 + Offset) &&
+ "Unexpected number of operands!");
+ SDValue Op = GetSoftenedFloat(N->getOperand(0 + Offset));
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
TargetLowering::MakeLibCallOptions CallOptions;
- EVT OpVT = N->getOperand(0).getValueType();
+ EVT OpVT = N->getOperand(0 + Offset).getValueType();
CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
- return TLI.makeLibCall(DAG, LC, NVT, Op, CallOptions, SDLoc(N)).first;
+ std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, NVT, Op,
+ CallOptions, SDLoc(N),
+ Chain);
+ if (IsStrict)
+ ReplaceValueWith(SDValue(N, 1), Tmp.second);
+ return Tmp.first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_Binary(SDNode *N, RTLIB::Libcall LC) {
+ bool IsStrict = N->isStrictFPOpcode();
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
- assert(N->getNumOperands() == 2 && "Unexpected number of operands!");
- SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
- GetSoftenedFloat(N->getOperand(1)) };
+ unsigned Offset = IsStrict ? 1 : 0;
+ assert(N->getNumOperands() == (2 + Offset) &&
+ "Unexpected number of operands!");
+ SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0 + Offset)),
+ GetSoftenedFloat(N->getOperand(1 + Offset)) };
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
TargetLowering::MakeLibCallOptions CallOptions;
- EVT OpsVT[2] = { N->getOperand(0).getValueType(),
- N->getOperand(1).getValueType() };
+ EVT OpsVT[2] = { N->getOperand(0 + Offset).getValueType(),
+ N->getOperand(1 + Offset).getValueType() };
CallOptions.setTypeListBeforeSoften(OpsVT, N->getValueType(0), true);
- return TLI.makeLibCall(DAG, LC, NVT, Ops, CallOptions, SDLoc(N)).first;
+ std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, NVT, Ops,
+ CallOptions, SDLoc(N),
+ Chain);
+ if (IsStrict)
+ ReplaceValueWith(SDValue(N, 1), Tmp.second);
+ return Tmp.first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_BITCAST(SDNode *N) {
@@ -368,22 +411,29 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FLOG10(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FMA(SDNode *N) {
+ bool IsStrict = N->isStrictFPOpcode();
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
- SDValue Ops[3] = { GetSoftenedFloat(N->getOperand(0)),
- GetSoftenedFloat(N->getOperand(1)),
- GetSoftenedFloat(N->getOperand(2)) };
+ unsigned Offset = IsStrict ? 1 : 0;
+ SDValue Ops[3] = { GetSoftenedFloat(N->getOperand(0 + Offset)),
+ GetSoftenedFloat(N->getOperand(1 + Offset)),
+ GetSoftenedFloat(N->getOperand(2 + Offset)) };
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
TargetLowering::MakeLibCallOptions CallOptions;
- EVT OpsVT[3] = { N->getOperand(0).getValueType(),
- N->getOperand(1).getValueType(),
- N->getOperand(2).getValueType() };
+ EVT OpsVT[3] = { N->getOperand(0 + Offset).getValueType(),
+ N->getOperand(1 + Offset).getValueType(),
+ N->getOperand(2 + Offset).getValueType() };
CallOptions.setTypeListBeforeSoften(OpsVT, N->getValueType(0), true);
- return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
- RTLIB::FMA_F32,
- RTLIB::FMA_F64,
- RTLIB::FMA_F80,
- RTLIB::FMA_F128,
- RTLIB::FMA_PPCF128),
- NVT, Ops, CallOptions, SDLoc(N)).first;
+ std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG,
+ GetFPLibCall(N->getValueType(0),
+ RTLIB::FMA_F32,
+ RTLIB::FMA_F64,
+ RTLIB::FMA_F80,
+ RTLIB::FMA_F128,
+ RTLIB::FMA_PPCF128),
+ NVT, Ops, CallOptions, SDLoc(N), Chain);
+ if (IsStrict)
+ ReplaceValueWith(SDValue(N, 1), Tmp.second);
+ return Tmp.first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FMUL(SDNode *N) {
@@ -430,14 +480,24 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FNEG(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FP_EXTEND(SDNode *N) {
+ bool IsStrict = N->isStrictFPOpcode();
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
- SDValue Op = N->getOperand(0);
+ SDValue Op = N->getOperand(IsStrict ? 1 : 0);
+
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
// There's only a libcall for f16 -> f32, so proceed in two stages. Also, it's
// entirely possible for both f16 and f32 to be legal, so use the fully
// hard-float FP_EXTEND rather than FP16_TO_FP.
if (Op.getValueType() == MVT::f16 && N->getValueType(0) != MVT::f32) {
- Op = DAG.getNode(ISD::FP_EXTEND, SDLoc(N), MVT::f32, Op);
+ if (IsStrict) {
+ Op = DAG.getNode(ISD::STRICT_FP_EXTEND, SDLoc(N),
+ { MVT::f32, MVT::Other }, { Chain, Op });
+ Chain = Op.getValue(1);
+ } else {
+ Op = DAG.getNode(ISD::FP_EXTEND, SDLoc(N), MVT::f32, Op);
+ }
+
if (getTypeAction(MVT::f32) == TargetLowering::TypeSoftenFloat)
AddToWorklist(Op.getNode());
}
@@ -456,7 +516,12 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FP_EXTEND(SDNode *N) {
TargetLowering::MakeLibCallOptions CallOptions;
EVT OpsVT[1] = { N->getOperand(0).getValueType() };
CallOptions.setTypeListBeforeSoften(OpsVT, N->getValueType(0), true);
- return TLI.makeLibCall(DAG, LC, NVT, Op, CallOptions, SDLoc(N)).first;
+ std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, NVT, Op,
+ CallOptions, SDLoc(N),
+ Chain);
+ if (IsStrict)
+ ReplaceValueWith(SDValue(N, 1), Tmp.second);
+ return Tmp.first;
}
// FIXME: Should we just use 'normal' FP_EXTEND / FP_TRUNC instead of special
@@ -505,7 +570,9 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FPOW(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) {
- assert(N->getOperand(1).getValueType() == MVT::i32 &&
+ bool IsStrict = N->isStrictFPOpcode();
+ unsigned Offset = IsStrict ? 1 : 0;
+ assert(N->getOperand(1 + Offset).getValueType() == MVT::i32 &&
"Unsupported power type!");
RTLIB::Libcall LC = GetFPLibCall(N->getValueType(0),
RTLIB::POWI_F32,
@@ -521,12 +588,19 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) {
}
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
- SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)), N->getOperand(1) };
+ SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0 + Offset)),
+ N->getOperand(1 + Offset) };
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
TargetLowering::MakeLibCallOptions CallOptions;
- EVT OpsVT[2] = { N->getOperand(0).getValueType(),
- N->getOperand(1).getValueType() };
+ EVT OpsVT[2] = { N->getOperand(0 + Offset).getValueType(),
+ N->getOperand(1 + Offset).getValueType() };
CallOptions.setTypeListBeforeSoften(OpsVT, N->getValueType(0), true);
- return TLI.makeLibCall(DAG, LC, NVT, Ops, CallOptions, SDLoc(N)).first;
+ std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, NVT, Ops,
+ CallOptions, SDLoc(N),
+ Chain);
+ if (IsStrict)
+ ReplaceValueWith(SDValue(N, 1), Tmp.second);
+ return Tmp.first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FREM(SDNode *N) {
@@ -718,12 +792,17 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
case ISD::BR_CC: Res = SoftenFloatOp_BR_CC(N); break;
case ISD::FP_EXTEND: Res = SoftenFloatOp_FP_EXTEND(N); break;
case ISD::FP_TO_FP16: // Same as FP_ROUND for softening purposes
+ case ISD::STRICT_FP_ROUND:
case ISD::FP_ROUND: Res = SoftenFloatOp_FP_ROUND(N); break;
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: Res = SoftenFloatOp_FP_TO_XINT(N); break;
+ case ISD::STRICT_LROUND:
case ISD::LROUND: Res = SoftenFloatOp_LROUND(N); break;
+ case ISD::STRICT_LLROUND:
case ISD::LLROUND: Res = SoftenFloatOp_LLROUND(N); break;
+ case ISD::STRICT_LRINT:
case ISD::LRINT: Res = SoftenFloatOp_LRINT(N); break;
+ case ISD::STRICT_LLRINT:
case ISD::LLRINT: Res = SoftenFloatOp_LLRINT(N); break;
case ISD::SELECT_CC: Res = SoftenFloatOp_SELECT_CC(N); break;
case ISD::SETCC: Res = SoftenFloatOp_SETCC(N); break;
@@ -774,20 +853,31 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_FP_EXTEND(SDNode *N) {
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_ROUND(SDNode *N) {
// We actually deal with the partially-softened FP_TO_FP16 node too, which
// returns an i16 so doesn't meet the constraints necessary for FP_ROUND.
- assert(N->getOpcode() == ISD::FP_ROUND || N->getOpcode() == ISD::FP_TO_FP16);
+ assert(N->getOpcode() == ISD::FP_ROUND || N->getOpcode() == ISD::FP_TO_FP16 ||
+ N->getOpcode() == ISD::STRICT_FP_ROUND);
- EVT SVT = N->getOperand(0).getValueType();
+ bool IsStrict = N->isStrictFPOpcode();
+ SDValue Op = N->getOperand(IsStrict ? 1 : 0);
+ EVT SVT = Op.getValueType();
EVT RVT = N->getValueType(0);
EVT FloatRVT = N->getOpcode() == ISD::FP_TO_FP16 ? MVT::f16 : RVT;
RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, FloatRVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_ROUND libcall");
- SDValue Op = GetSoftenedFloat(N->getOperand(0));
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
+ Op = GetSoftenedFloat(Op);
TargetLowering::MakeLibCallOptions CallOptions;
- EVT OpsVT[1] = { N->getOperand(0).getValueType() };
- CallOptions.setTypeListBeforeSoften(OpsVT, N->getValueType(0), true);
- return TLI.makeLibCall(DAG, LC, RVT, Op, CallOptions, SDLoc(N)).first;
+ CallOptions.setTypeListBeforeSoften(SVT, RVT, true);
+ std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, RVT, Op,
+ CallOptions, SDLoc(N),
+ Chain);
+ if (IsStrict) {
+ ReplaceValueWith(SDValue(N, 1), Tmp.second);
+ ReplaceValueWith(SDValue(N, 0), Tmp.first);
+ return SDValue();
+ }
+ return Tmp.first;
}
SDValue DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) {
@@ -947,15 +1037,27 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_FCOPYSIGN(SDNode *N) {
SDValue DAGTypeLegalizer::SoftenFloatOp_Unary(SDNode *N, RTLIB::Libcall LC) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
- SDValue Op = GetSoftenedFloat(N->getOperand(0));
+ bool IsStrict = N->isStrictFPOpcode();
+ unsigned Offset = IsStrict ? 1 : 0;
+ SDValue Op = GetSoftenedFloat(N->getOperand(0 + Offset));
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
TargetLowering::MakeLibCallOptions CallOptions;
- EVT OpVT = N->getOperand(0).getValueType();
+ EVT OpVT = N->getOperand(0 + Offset).getValueType();
CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
- return TLI.makeLibCall(DAG, LC, NVT, Op, CallOptions, SDLoc(N)).first;
+ std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, NVT, Op,
+ CallOptions, SDLoc(N),
+ Chain);
+ if (IsStrict) {
+ ReplaceValueWith(SDValue(N, 1), Tmp.second);
+ ReplaceValueWith(SDValue(N, 0), Tmp.first);
+ return SDValue();
+ }
+
+ return Tmp.first;
}
SDValue DAGTypeLegalizer::SoftenFloatOp_LROUND(SDNode *N) {
- EVT OpVT = N->getOperand(0).getValueType();
+ EVT OpVT = N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType();
return SoftenFloatOp_Unary(N, GetFPLibCall(OpVT,
RTLIB::LROUND_F32,
RTLIB::LROUND_F64,
@@ -965,7 +1067,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_LROUND(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatOp_LLROUND(SDNode *N) {
- EVT OpVT = N->getOperand(0).getValueType();
+ EVT OpVT = N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType();
return SoftenFloatOp_Unary(N, GetFPLibCall(OpVT,
RTLIB::LLROUND_F32,
RTLIB::LLROUND_F64,
@@ -975,7 +1077,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_LLROUND(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatOp_LRINT(SDNode *N) {
- EVT OpVT = N->getOperand(0).getValueType();
+ EVT OpVT = N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType();
return SoftenFloatOp_Unary(N, GetFPLibCall(OpVT,
RTLIB::LRINT_F32,
RTLIB::LRINT_F64,
@@ -985,7 +1087,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_LRINT(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatOp_LLRINT(SDNode *N) {
- EVT OpVT = N->getOperand(0).getValueType();
+ EVT OpVT = N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType();
return SoftenFloatOp_Unary(N, GetFPLibCall(OpVT,
RTLIB::LLRINT_F32,
RTLIB::LLRINT_F64,
diff --git a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
index 9f5c32421dcc..05b129ceeeaa 100644
--- a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
+++ b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
@@ -1,10 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx \
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-android \
; RUN: -enable-legalize-types-checking \
; RUN: -disable-strictnode-mutation | FileCheck %s
-; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx \
+; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu \
; RUN: -enable-legalize-types-checking \
; RUN: -disable-strictnode-mutation | FileCheck %s
+; RUN: llc < %s -O2 -mtriple=i686-linux-gnu -mattr=+sse2 \
+; RUN: -enable-legalize-types-checking \
+; RUN: -disable-strictnode-mutation | FileCheck %s --check-prefix=X86
; Check all soft floating point library function calls.
@@ -15,6 +18,39 @@ define fp128 @add(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: callq __addtf3
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: add:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __addtf3
+; X86-NEXT: addl $44, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%add = call fp128 @llvm.experimental.constrained.fadd.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %add
@@ -27,6 +63,39 @@ define fp128 @sub(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: callq __subtf3
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: sub:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __subtf3
+; X86-NEXT: addl $44, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%sub = call fp128 @llvm.experimental.constrained.fsub.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %sub
@@ -39,6 +108,39 @@ define fp128 @mul(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: callq __multf3
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: mul:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __multf3
+; X86-NEXT: addl $44, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%mul = call fp128 @llvm.experimental.constrained.fmul.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %mul
@@ -51,6 +153,39 @@ define fp128 @div(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: callq __divtf3
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: div:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __divtf3
+; X86-NEXT: addl $44, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%div = call fp128 @llvm.experimental.constrained.fdiv.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %div
@@ -63,6 +198,43 @@ define fp128 @fma(fp128 %x, fp128 %y, fp128 %z) nounwind strictfp {
; CHECK-NEXT: callq fmal
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: fma:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll fmal
+; X86-NEXT: addl $60, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%fma = call fp128 @llvm.experimental.constrained.fma.f128(fp128 %x, fp128 %y, fp128 %z, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %fma
@@ -75,6 +247,39 @@ define fp128 @frem(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: callq fmodl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: frem:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll fmodl
+; X86-NEXT: addl $44, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%div = call fp128 @llvm.experimental.constrained.frem.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %div
@@ -87,6 +292,35 @@ define fp128 @ceil(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq ceill
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: ceil:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll ceill
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%ceil = call fp128 @llvm.experimental.constrained.ceil.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %ceil
@@ -99,6 +333,35 @@ define fp128 @cos(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq cosl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: cos:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll cosl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%cos = call fp128 @llvm.experimental.constrained.cos.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %cos
@@ -111,6 +374,35 @@ define fp128 @exp(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq expl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: exp:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll expl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%exp = call fp128 @llvm.experimental.constrained.exp.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %exp
@@ -123,6 +415,35 @@ define fp128 @exp2(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq exp2l
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: exp2:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll exp2l
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%exp2 = call fp128 @llvm.experimental.constrained.exp2.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %exp2
@@ -135,6 +456,35 @@ define fp128 @floor(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq floorl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: floor:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll floorl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%floor = call fp128 @llvm.experimental.constrained.floor.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %floor
@@ -147,6 +497,35 @@ define fp128 @log(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq logl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: log:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll logl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%log = call fp128 @llvm.experimental.constrained.log.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %log
@@ -159,6 +538,35 @@ define fp128 @log10(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq log10l
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: log10:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll log10l
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%log10 = call fp128 @llvm.experimental.constrained.log10.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %log10
@@ -171,6 +579,35 @@ define fp128 @log2(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq log2l
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: log2:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll log2l
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%log2 = call fp128 @llvm.experimental.constrained.log2.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %log2
@@ -183,6 +620,39 @@ define fp128 @maxnum(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: callq fmaxl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: maxnum:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll fmaxl
+; X86-NEXT: addl $44, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%maxnum = call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %maxnum
@@ -195,6 +665,39 @@ define fp128 @minnum(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: callq fminl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: minnum:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll fminl
+; X86-NEXT: addl $44, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%minnum = call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %minnum
@@ -207,6 +710,35 @@ define fp128 @nearbyint(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq nearbyintl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: nearbyint:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll nearbyintl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%nearbyint = call fp128 @llvm.experimental.constrained.nearbyint.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %nearbyint
@@ -219,6 +751,39 @@ define fp128 @pow(fp128 %x, fp128 %y) nounwind strictfp {
; CHECK-NEXT: callq powl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: pow:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll powl
+; X86-NEXT: addl $44, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%pow = call fp128 @llvm.experimental.constrained.pow.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %pow
@@ -231,6 +796,36 @@ define fp128 @powi(fp128 %x, i32 %y) nounwind strictfp {
; CHECK-NEXT: callq __powitf2
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: powi:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll __powitf2
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%powi = call fp128 @llvm.experimental.constrained.powi.f128(fp128 %x, i32 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %powi
@@ -243,6 +838,35 @@ define fp128 @rint(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq rintl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: rint:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll rintl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%rint = call fp128 @llvm.experimental.constrained.rint.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %rint
@@ -255,6 +879,35 @@ define fp128 @round(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq roundl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: round:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll roundl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%round = call fp128 @llvm.experimental.constrained.round.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %round
@@ -267,6 +920,35 @@ define fp128 @sin(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq sinl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: sin:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll sinl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%sin = call fp128 @llvm.experimental.constrained.sin.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %sin
@@ -279,6 +961,35 @@ define fp128 @sqrt(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq sqrtl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: sqrt:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll sqrtl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%sqrt = call fp128 @llvm.experimental.constrained.sqrt.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %sqrt
@@ -291,6 +1002,35 @@ define fp128 @trunc(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq truncl
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
+;
+; X86-LABEL: trunc:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $20, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll truncl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
+; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $20, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
entry:
%trunc = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %trunc
@@ -303,6 +1043,17 @@ define i32 @lrint(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq lrintl
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
+;
+; X86-LABEL: lrint:
+; X86: # %bb.0: # %entry
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll lrintl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: retl
entry:
%rint = call i32 @llvm.experimental.constrained.lrint.i32.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret i32 %rint
@@ -315,6 +1066,17 @@ define i64 @llrint(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq llrintl
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
+;
+; X86-LABEL: llrint:
+; X86: # %bb.0: # %entry
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll llrintl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: retl
entry:
%rint = call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret i64 %rint
@@ -327,6 +1089,17 @@ define i32 @lround(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq lroundl
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
+;
+; X86-LABEL: lround:
+; X86: # %bb.0: # %entry
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll lroundl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: retl
entry:
%round = call i32 @llvm.experimental.constrained.lround.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret i32 %round
@@ -339,6 +1112,17 @@ define i64 @llround(fp128 %x) nounwind strictfp {
; CHECK-NEXT: callq llroundl
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
+;
+; X86-LABEL: llround:
+; X86: # %bb.0: # %entry
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll llroundl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: retl
entry:
%round = call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret i64 %round
More information about the llvm-commits
mailing list