[llvm] [SelectionDAGBuilder] Remove NoNaNsFPMath uses (PR #169904)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Feb 1 17:51:08 PST 2026
https://github.com/paperchalice updated https://github.com/llvm/llvm-project/pull/169904
>From 18b6bfd8bdd47ab78ef232e8bda992886994d452 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Fri, 28 Nov 2025 19:36:20 +0800
Subject: [PATCH 01/24] [SelectionDAG] Remove NoNaNsFPMath Replaced by checking
fast-math flags or nofpclass.
---
.../SelectionDAG/SelectionDAGBuilder.cpp | 31 +++++++++---
llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll | 50 +++++++++----------
2 files changed, 48 insertions(+), 33 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 52e0d8fefdf2f..644487864e8ca 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2447,6 +2447,26 @@ static bool InBlock(const Value *V, const BasicBlock *BB) {
return true;
}
+static bool AreFCmpOperandsNonNaN(const Instruction *Inst,
+ const SelectionDAG &DAG) {
+ assert(
+ (isa<FCmpInst>(Inst) || isa<ConstrainedFPCmpIntrinsic>(Inst) ||
+ (isa<VPIntrinsic>(Inst) &&
+ dyn_cast<VPIntrinsic>(Inst)->getIntrinsicID() == Intrinsic::vp_fcmp)) &&
+ "Not fcmp instruction or its intrinsic variants!");
+
+ if (const auto *FPOp = dyn_cast<FPMathOperator>(Inst))
+ if (FPOp->hasNoNaNs())
+ return true;
+
+ for (int I = 0; I != 2; ++I)
+ if (!isKnownNeverNaN(Inst->getOperand(I),
+ SimplifyQuery(DAG.getDataLayout(), Inst)))
+ return false;
+
+ return true;
+}
+
/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
/// This function emits a branch and is used at the leaves of an OR or an
/// AND operator tree.
@@ -2480,7 +2500,7 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
FCmpInst::Predicate Pred =
InvertCond ? FC->getInversePredicate() : FC->getPredicate();
Condition = getFCmpCondCode(Pred);
- if (TM.Options.NoNaNsFPMath)
+ if (AreFCmpOperandsNonNaN(FC, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
}
@@ -3793,7 +3813,7 @@ void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
ISD::CondCode Condition = getFCmpCondCode(predicate);
auto *FPMO = cast<FPMathOperator>(&I);
- if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
+ if (AreFCmpOperandsNonNaN(&I, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
SDNodeFlags Flags;
@@ -8534,7 +8554,7 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
case ISD::STRICT_FSETCCS: {
auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
- if (TM.Options.NoNaNsFPMath)
+ if (AreFCmpOperandsNonNaN(FPCmp, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
Opers.push_back(DAG.getCondCode(Condition));
break;
@@ -8817,11 +8837,8 @@ void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
CmpInst::Predicate CondCode = VPIntrin.getPredicate();
bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
if (IsFP) {
- // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
- // flags, but calls that don't return floating-point types can't be
- // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
Condition = getFCmpCondCode(CondCode);
- if (TM.Options.NoNaNsFPMath)
+ if (AreFCmpOperandsNonNaN(&VPIntrin, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
} else {
Condition = getICmpCondCode(CondCode);
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
index fe074908a51a5..0b16eb2a17a8f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
@@ -635,7 +635,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp one <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan one <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -882,7 +882,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ueq <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan ueq <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -989,7 +989,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ugt <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan ugt <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -1096,7 +1096,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp uge <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan uge <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -1203,7 +1203,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ult <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan ult <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -1310,7 +1310,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ule <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan ule <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2163,7 +2163,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp one <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan one <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2410,7 +2410,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ueq <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan ueq <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2517,7 +2517,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ugt <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan ugt <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2624,7 +2624,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp uge <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan uge <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2731,7 +2731,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ult <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan ult <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2838,7 +2838,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ule <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan ule <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3415,7 +3415,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp one <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan one <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3546,7 +3546,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ueq <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan ueq <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3605,7 +3605,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ugt <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan ugt <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3664,7 +3664,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp uge <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan uge <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3723,7 +3723,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ult <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan ult <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3782,7 +3782,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ule <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan ule <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4249,7 +4249,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp one <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan one <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4380,7 +4380,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ueq <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan ueq <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4439,7 +4439,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ugt <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan ugt <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4498,7 +4498,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp uge <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan uge <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4557,7 +4557,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ult <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan ult <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4616,7 +4616,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ule <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan ule <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4798,5 +4798,3 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
%vc = fcmp oeq <vscale x 16 x double> %va, zeroinitializer
ret <vscale x 16 x i1> %vc
}
-
-attributes #0 = { "no-nans-fp-math"="true" }
>From 3117c15fd4e348815a976af0dc5ebae72d4107b9 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Fri, 28 Nov 2025 21:19:32 +0800
Subject: [PATCH 02/24] [X86] Consider fast-math flags when combining select
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 144d6451b981f..145cf4c55cc5d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -48498,6 +48498,8 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
}
if (Opcode) {
+ // Propagate fast-math-flags.
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N->getFlags());
if (IsStrict) {
SDValue Ret = DAG.getNode(Opcode == X86ISD::FMIN ? X86ISD::STRICT_FMIN
: X86ISD::STRICT_FMAX,
@@ -56151,8 +56153,9 @@ static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
// FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
- if (!DAG.getTarget().Options.NoNaNsFPMath ||
- !DAG.getTarget().Options.NoSignedZerosFPMath)
+ if ((!DAG.getTarget().Options.NoNaNsFPMath && !N->getFlags().hasNoNaNs()) ||
+ (!DAG.getTarget().Options.NoSignedZerosFPMath &&
+ !N->getFlags().hasNoSignedZeros()))
return SDValue();
// If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
>From 05d70f949423fab14141fce109acc1f6802b6eb7 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Fri, 28 Nov 2025 21:36:44 +0800
Subject: [PATCH 03/24] Fix X86 tests
---
llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll | 16 +-
llvm/test/CodeGen/X86/avx-minmax.ll | 18 +-
llvm/test/CodeGen/X86/sse-minmax-finite.ll | 787 +++++++++++
llvm/test/CodeGen/X86/sse-minmax-unsafe.ll | 687 ++++++++++
llvm/test/CodeGen/X86/sse-minmax.ll | 1332 ++++++-------------
5 files changed, 1924 insertions(+), 916 deletions(-)
create mode 100644 llvm/test/CodeGen/X86/sse-minmax-finite.ll
create mode 100644 llvm/test/CodeGen/X86/sse-minmax-unsafe.ll
diff --git a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
index 940fe8cf6ba75..9f072c6334c07 100644
--- a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
+++ b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
@@ -1,13 +1,17 @@
-; RUN: llc < %s -mtriple=i686-- -mattr=-sse | FileCheck %s -check-prefix=WITHNANS
-; RUN: llc < %s -mtriple=i686-- -mattr=-sse -enable-no-nans-fp-math | FileCheck %s -check-prefix=NONANS
+; RUN: llc < %s -mtriple=i686-- -mattr=-sse | FileCheck %s
-; WITHNANS-LABEL: test:
-; WITHNANS: setnp
-; NONANS-LABEL: test:
-; NONANS-NOT: setnp
define i32 @test(float %f) {
+; CHECK-LABEL: test:
+; CHECK: setnp
%tmp = fcmp oeq float %f, 0.000000e+00 ; <i1> [#uses=1]
%tmp.upgrd.1 = zext i1 %tmp to i32 ; <i32> [#uses=1]
ret i32 %tmp.upgrd.1
}
+define i32 @test_nnan(float %f) {
+; CHECK-LABEL: test_nnan:
+; CHECK-NOT: setnp
+ %tmp = fcmp nnan oeq float %f, 0.000000e+00 ; <i1> [#uses=1]
+ %tmp.upgrd.1 = zext i1 %tmp to i32 ; <i32> [#uses=1]
+ ret i32 %tmp.upgrd.1
+}
diff --git a/llvm/test/CodeGen/X86/avx-minmax.ll b/llvm/test/CodeGen/X86/avx-minmax.ll
index 8e4b6c6af4cb1..27864a9eefa8e 100644
--- a/llvm/test/CodeGen/X86/avx-minmax.ll
+++ b/llvm/test/CodeGen/X86/avx-minmax.ll
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -enable-no-nans-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s
define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) {
; CHECK-LABEL: maxpd:
; CHECK: # %bb.0:
; CHECK-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
- %max_is_x = fcmp oge <2 x double> %x, %y
+ %max_is_x = fcmp nnan oge <2 x double> %x, %y
%max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %max
}
@@ -16,7 +16,7 @@ define <2 x double> @minpd(<2 x double> %x, <2 x double> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vminpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
- %min_is_x = fcmp ole <2 x double> %x, %y
+ %min_is_x = fcmp nnan ole <2 x double> %x, %y
%min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %min
}
@@ -26,7 +26,7 @@ define <4 x float> @maxps(<4 x float> %x, <4 x float> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
- %max_is_x = fcmp oge <4 x float> %x, %y
+ %max_is_x = fcmp nnan oge <4 x float> %x, %y
%max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %max
}
@@ -36,7 +36,7 @@ define <4 x float> @minps(<4 x float> %x, <4 x float> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
- %min_is_x = fcmp ole <4 x float> %x, %y
+ %min_is_x = fcmp nnan ole <4 x float> %x, %y
%min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %min
}
@@ -46,7 +46,7 @@ define <4 x double> @vmaxpd(<4 x double> %x, <4 x double> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
- %max_is_x = fcmp oge <4 x double> %x, %y
+ %max_is_x = fcmp nnan oge <4 x double> %x, %y
%max = select <4 x i1> %max_is_x, <4 x double> %x, <4 x double> %y
ret <4 x double> %max
}
@@ -56,7 +56,7 @@ define <4 x double> @vminpd(<4 x double> %x, <4 x double> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vminpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
- %min_is_x = fcmp ole <4 x double> %x, %y
+ %min_is_x = fcmp nnan ole <4 x double> %x, %y
%min = select <4 x i1> %min_is_x, <4 x double> %x, <4 x double> %y
ret <4 x double> %min
}
@@ -66,7 +66,7 @@ define <8 x float> @vmaxps(<8 x float> %x, <8 x float> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
- %max_is_x = fcmp oge <8 x float> %x, %y
+ %max_is_x = fcmp nnan oge <8 x float> %x, %y
%max = select <8 x i1> %max_is_x, <8 x float> %x, <8 x float> %y
ret <8 x float> %max
}
@@ -76,7 +76,7 @@ define <8 x float> @vminps(<8 x float> %x, <8 x float> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
- %min_is_x = fcmp ole <8 x float> %x, %y
+ %min_is_x = fcmp nnan ole <8 x float> %x, %y
%min = select <8 x i1> %min_is_x, <8 x float> %x, <8 x float> %y
ret <8 x float> %min
}
diff --git a/llvm/test/CodeGen/X86/sse-minmax-finite.ll b/llvm/test/CodeGen/X86/sse-minmax-finite.ll
new file mode 100644
index 0000000000000..469637964d849
--- /dev/null
+++ b/llvm/test/CodeGen/X86/sse-minmax-finite.ll
@@ -0,0 +1,787 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s
+
+; Some of these patterns can be matched as SSE min or max. Some of
+; them can be matched provided that the operands are swapped.
+; Some of them can't be matched at all and require a comparison
+; and a conditional branch.
+
+; The naming convention is {,x_,y_}{o,u}{gt,lt,ge,le}{,_inverse}
+; _x: use 0.0 instead of %y
+; _y: use -0.0 instead of %y
+; _inverse : swap the arms of the select.
+
+define double @ogt(double %x, double %y) {
+; CHECK-LABEL: ogt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @olt(double %x, double %y) {
+; CHECK-LABEL: olt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ogt_inverse(double %x, double %y) {
+; CHECK-LABEL: ogt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @olt_inverse(double %x, double %y) {
+; CHECK-LABEL: olt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @oge(double %x, double %y) {
+; CHECK-LABEL: oge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ole(double %x, double %y) {
+; CHECK-LABEL: ole:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @oge_inverse(double %x, double %y) {
+; RELAX-LABEL: oge_inverse:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ole_inverse(double %x, double %y) {
+; RELAX-LABEL: ole_inverse:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ogt_x(double %x) {
+; CHECK-LABEL: ogt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @olt_x(double %x) {
+; CHECK-LABEL: olt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ogt_inverse_x(double %x) {
+; CHECK-LABEL: ogt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @olt_inverse_x(double %x) {
+; CHECK-LABEL: olt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @oge_x(double %x) {
+; CHECK-LABEL: oge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ole_x(double %x) {
+; CHECK-LABEL: ole_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @oge_inverse_x(double %x) {
+; RELAX-LABEL: oge_inverse_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ole_inverse_x(double %x) {
+; RELAX-LABEL: ole_inverse_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ugt(double %x, double %y) {
+; RELAX-LABEL: ugt:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ugt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ult(double %x, double %y) {
+; RELAX-LABEL: ult:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ult:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ugt_inverse(double %x, double %y) {
+; CHECK-LABEL: ugt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ult_inverse(double %x, double %y) {
+; CHECK-LABEL: ult_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @uge(double %x, double %y) {
+; CHECK-LABEL: uge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ule(double %x, double %y) {
+; CHECK-LABEL: ule:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @uge_inverse(double %x, double %y) {
+; CHECK-LABEL: uge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ule_inverse(double %x, double %y) {
+; CHECK-LABEL: ule_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ugt_x(double %x) {
+; RELAX-LABEL: ugt_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ugt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ult_x(double %x) {
+; RELAX-LABEL: ult_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ult_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ugt_inverse_x(double %x) {
+; CHECK-LABEL: ugt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ult_inverse_x(double %x) {
+; CHECK-LABEL: ult_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @uge_x(double %x) {
+; CHECK-LABEL: uge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ule_x(double %x) {
+; CHECK-LABEL: ule_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @uge_inverse_x(double %x) {
+; CHECK-LABEL: uge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ule_inverse_x(double %x) {
+; CHECK-LABEL: ule_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ogt_y(double %x) {
+; CHECK-LABEL: ogt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @olt_y(double %x) {
+; CHECK-LABEL: olt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ogt_inverse_y(double %x) {
+; CHECK-LABEL: ogt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @olt_inverse_y(double %x) {
+; CHECK-LABEL: olt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @oge_y(double %x) {
+; CHECK-LABEL: oge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ole_y(double %x) {
+; CHECK-LABEL: ole_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @oge_inverse_y(double %x) {
+; RELAX-LABEL: oge_inverse_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ole_inverse_y(double %x) {
+; RELAX-LABEL: ole_inverse_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ugt_y(double %x) {
+; RELAX-LABEL: ugt_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ugt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ult_y(double %x) {
+; RELAX-LABEL: ult_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ult_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ugt_inverse_y(double %x) {
+; CHECK-LABEL: ugt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ult_inverse_y(double %x) {
+; CHECK-LABEL: ult_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @uge_y(double %x) {
+; CHECK-LABEL: uge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ule_y(double %x) {
+; CHECK-LABEL: ule_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @uge_inverse_y(double %x) {
+; CHECK-LABEL: uge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ule_inverse_y(double %x) {
+; CHECK-LABEL: ule_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+; Test a few more misc. cases.
+
+define double @clampTo3k_a(double %x) {
+; CHECK-LABEL: clampTo3k_a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan ogt double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_b(double %x) {
+; CHECK-LABEL: clampTo3k_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan uge double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_c(double %x) {
+; CHECK-LABEL: clampTo3k_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan olt double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_d(double %x) {
+; CHECK-LABEL: clampTo3k_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan ule double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_e(double %x) {
+; CHECK-LABEL: clampTo3k_e:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan olt double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_f(double %x) {
+; CHECK-LABEL: clampTo3k_f:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan ule double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_g(double %x) {
+; CHECK-LABEL: clampTo3k_g:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan ogt double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_h(double %x) {
+; CHECK-LABEL: clampTo3k_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan uge double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_maxpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxpd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nnan oge <2 x double> %x, %y
+ %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %max
+}
+
+define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_minpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minpd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nnan ole <2 x double> %x, %y
+ %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %min
+}
+
+define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_maxps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nnan oge <4 x float> %x, %y
+ %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %max
+}
+
+define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_minps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nnan ole <4 x float> %x, %y
+ %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %min
+}
+
+define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
+; CHECK-LABEL: test_maxps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nnan oge <2 x float> %x, %y
+ %max = select <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
+ ret <2 x float> %max
+}
+
+define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
+; CHECK-LABEL: test_minps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nnan ole <2 x float> %x, %y
+ %min = select <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
+ ret <2 x float> %min
+}
+
+define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
+; CHECK-LABEL: test_maxps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nnan oge <3 x float> %x, %y
+ %max = select <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
+ ret <3 x float> %max
+}
+
+define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
+; CHECK-LABEL: test_minps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nnan ole <3 x float> %x, %y
+ %min = select <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
+ ret <3 x float> %min
+}
+
+; OSS-Fuzz #13838
+; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13838
+define float @ossfuzz13838(float %x) {
+; CHECK-LABEL: ossfuzz13838:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: retq
+bb:
+ %cmp2 = fcmp nnan fast olt float %x, 2.550000e+02
+ %B1 = urem i1 %cmp2, %cmp2
+ %min = select i1 %B1, float %x, float 2.550000e+02
+ %B = frem float %min, 0x47EFFFFFE0000000
+ %cmp1 = fcmp nnan fast olt float %B, 1.000000e+00
+ %r = select i1 %cmp1, float 1.000000e+00, float %min
+ ret float %r
+}
diff --git a/llvm/test/CodeGen/X86/sse-minmax-unsafe.ll b/llvm/test/CodeGen/X86/sse-minmax-unsafe.ll
new file mode 100644
index 0000000000000..88167250a5314
--- /dev/null
+++ b/llvm/test/CodeGen/X86/sse-minmax-unsafe.ll
@@ -0,0 +1,687 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s
+
+; Some of these patterns can be matched as SSE min or max. Some of
+; them can be matched provided that the operands are swapped.
+; Some of them can't be matched at all and require a comparison
+; and a conditional branch.
+
+; The naming convention is {,x_,y_}{o,u}{gt,lt,ge,le}{,_inverse}
+; _x: use 0.0 instead of %y
+; _y: use -0.0 instead of %y
+; _inverse : swap the arms of the select.
+
+define double @ogt(double %x, double %y) {
+; CHECK-LABEL: ogt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @olt(double %x, double %y) {
+; CHECK-LABEL: olt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ogt_inverse(double %x, double %y) {
+; CHECK-LABEL: ogt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @olt_inverse(double %x, double %y) {
+; CHECK-LABEL: olt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @oge(double %x, double %y) {
+; CHECK-LABEL: oge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ole(double %x, double %y) {
+; CHECK-LABEL: ole:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @oge_inverse(double %x, double %y) {
+; CHECK-LABEL: oge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ole_inverse(double %x, double %y) {
+; CHECK-LABEL: ole_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ogt_x(double %x) {
+; CHECK-LABEL: ogt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @olt_x(double %x) {
+; CHECK-LABEL: olt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ogt_inverse_x(double %x) {
+; CHECK-LABEL: ogt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @olt_inverse_x(double %x) {
+; CHECK-LABEL: olt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @oge_x(double %x) {
+; CHECK-LABEL: oge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ole_x(double %x) {
+; CHECK-LABEL: ole_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @oge_inverse_x(double %x) {
+; CHECK-LABEL: oge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ole_inverse_x(double %x) {
+; CHECK-LABEL: ole_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ugt(double %x, double %y) {
+; CHECK-LABEL: ugt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ult(double %x, double %y) {
+; CHECK-LABEL: ult:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ugt_inverse(double %x, double %y) {
+; CHECK-LABEL: ugt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ult_inverse(double %x, double %y) {
+; CHECK-LABEL: ult_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @uge(double %x, double %y) {
+; CHECK-LABEL: uge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ule(double %x, double %y) {
+; CHECK-LABEL: ule:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @uge_inverse(double %x, double %y) {
+; CHECK-LABEL: uge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ule_inverse(double %x, double %y) {
+; CHECK-LABEL: ule_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ugt_x(double %x) {
+; CHECK-LABEL: ugt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ult_x(double %x) {
+; CHECK-LABEL: ult_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ugt_inverse_x(double %x) {
+; CHECK-LABEL: ugt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ult_inverse_x(double %x) {
+; CHECK-LABEL: ult_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @uge_x(double %x) {
+; CHECK-LABEL: uge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ule_x(double %x) {
+; CHECK-LABEL: ule_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @uge_inverse_x(double %x) {
+; CHECK-LABEL: uge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ule_inverse_x(double %x) {
+; CHECK-LABEL: ule_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ogt_y(double %x) {
+; CHECK-LABEL: ogt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @olt_y(double %x) {
+; CHECK-LABEL: olt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ogt_inverse_y(double %x) {
+; CHECK-LABEL: ogt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @olt_inverse_y(double %x) {
+; CHECK-LABEL: olt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @oge_y(double %x) {
+; CHECK-LABEL: oge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ole_y(double %x) {
+; CHECK-LABEL: ole_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @oge_inverse_y(double %x) {
+; CHECK-LABEL: oge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ole_inverse_y(double %x) {
+; CHECK-LABEL: ole_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ugt_y(double %x) {
+; CHECK-LABEL: ugt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ult_y(double %x) {
+; CHECK-LABEL: ult_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ugt_inverse_y(double %x) {
+; CHECK-LABEL: ugt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ult_inverse_y(double %x) {
+; CHECK-LABEL: ult_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @uge_y(double %x) {
+; CHECK-LABEL: uge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ule_y(double %x) {
+; CHECK-LABEL: ule_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @uge_inverse_y(double %x) {
+; CHECK-LABEL: uge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ule_inverse_y(double %x) {
+; CHECK-LABEL: ule_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+; Test a few more misc. cases.
+
+define double @clampTo3k_a(double %x) {
+; CHECK-LABEL: clampTo3k_a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan ogt double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_b(double %x) {
+; CHECK-LABEL: clampTo3k_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan uge double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_c(double %x) {
+; CHECK-LABEL: clampTo3k_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan olt double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_d(double %x) {
+; CHECK-LABEL: clampTo3k_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan ule double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_e(double %x) {
+; CHECK-LABEL: clampTo3k_e:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan olt double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_f(double %x) {
+; CHECK-LABEL: clampTo3k_f:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan ule double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_g(double %x) {
+; CHECK-LABEL: clampTo3k_g:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan ogt double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_h(double %x) {
+; CHECK-LABEL: clampTo3k_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan uge double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_maxpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxpd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nsz nnan oge <2 x double> %x, %y
+ %max = select nsz nnan <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %max
+}
+
+define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_minpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minpd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nsz nnan ole <2 x double> %x, %y
+ %min = select nsz nnan <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %min
+}
+
+define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_maxps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nsz nnan oge <4 x float> %x, %y
+ %max = select nsz nnan <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %max
+}
+
+define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_minps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nsz nnan ole <4 x float> %x, %y
+ %min = select nsz nnan <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %min
+}
+
+define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
+; CHECK-LABEL: test_maxps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nsz nnan oge <2 x float> %x, %y
+ %max = select nsz nnan <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
+ ret <2 x float> %max
+}
+
+define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
+; CHECK-LABEL: test_minps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nsz nnan ole <2 x float> %x, %y
+ %min = select nsz nnan <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
+ ret <2 x float> %min
+}
+
+define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
+; CHECK-LABEL: test_maxps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nsz nnan oge <3 x float> %x, %y
+ %max = select nsz nnan <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
+ ret <3 x float> %max
+}
+
+define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
+; CHECK-LABEL: test_minps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nsz nnan ole <3 x float> %x, %y
+ %min = select nsz nnan <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
+ ret <3 x float> %min
+}
+
+; OSS-Fuzz #13838
+; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13838
+define float @ossfuzz13838(float %x) {
+; CHECK-LABEL: ossfuzz13838:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: retq
+bb:
+ %cmp2 = fcmp nsz nnan fast olt float %x, 2.550000e+02
+ %B1 = urem i1 %cmp2, %cmp2
+ %min = select nsz nnan i1 %B1, float %x, float 2.550000e+02
+ %B = frem float %min, 0x47EFFFFFE0000000
+ %cmp1 = fcmp nsz nnan fast olt float %B, 1.000000e+00
+ %r = select nsz nnan i1 %cmp1, float 1.000000e+00, float %min
+ ret float %r
+}
diff --git a/llvm/test/CodeGen/X86/sse-minmax.ll b/llvm/test/CodeGen/X86/sse-minmax.ll
index 7904b21a3b1fa..2b97f98450973 100644
--- a/llvm/test/CodeGen/X86/sse-minmax.ll
+++ b/llvm/test/CodeGen/X86/sse-minmax.ll
@@ -1,7 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefix=ALL --check-prefix=STRICT
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 -enable-no-signed-zeros-fp-math -enable-no-nans-fp-math | FileCheck %s --check-prefix=ALL --check-prefix=RELAX --check-prefix=UNSAFE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 -enable-no-nans-fp-math | FileCheck %s --check-prefix=ALL --check-prefix=RELAX --check-prefix=FINITE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s
; Some of these patterns can be matched as SSE min or max. Some of
; them can be matched provided that the operands are swapped.
@@ -14,972 +12,640 @@
; _inverse : swap the arms of the select.
define double @ogt(double %x, double %y) {
-; ALL-LABEL: ogt:
-; ALL: # %bb.0:
-; ALL-NEXT: maxsd %xmm1, %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: ogt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @olt(double %x, double %y) {
-; ALL-LABEL: olt:
-; ALL: # %bb.0:
-; ALL-NEXT: minsd %xmm1, %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: olt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @ogt_inverse(double %x, double %y) {
-; STRICT-LABEL: ogt_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ogt_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ogt_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ogt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @olt_inverse(double %x, double %y) {
-; STRICT-LABEL: olt_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: olt_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: olt_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: olt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @oge(double %x, double %y) {
-; STRICT-LABEL: oge:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: cmplesd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: oge:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: oge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: cmplesd %xmm2, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @ole(double %x, double %y) {
-; STRICT-LABEL: ole:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmplesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ole:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ole:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @oge_inverse(double %x, double %y) {
-; STRICT-LABEL: oge_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: cmplesd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: oge_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: oge_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: oge_inverse:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: cmplesd %xmm2, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @ole_inverse(double %x, double %y) {
-; STRICT-LABEL: ole_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmplesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ole_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ole_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: ole_inverse:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @ogt_x(double %x) {
-; ALL-LABEL: ogt_x:
-; ALL: # %bb.0:
-; ALL-NEXT: xorpd %xmm1, %xmm1
-; ALL-NEXT: maxsd %xmm1, %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: ogt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @olt_x(double %x) {
-; ALL-LABEL: olt_x:
-; ALL: # %bb.0:
-; ALL-NEXT: xorpd %xmm1, %xmm1
-; ALL-NEXT: minsd %xmm1, %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: olt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @ogt_inverse_x(double %x) {
-; STRICT-LABEL: ogt_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ogt_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ogt_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ogt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @olt_inverse_x(double %x) {
-; STRICT-LABEL: olt_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: olt_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: olt_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: olt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @oge_x(double %x) {
-; STRICT-LABEL: oge_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: cmplesd %xmm0, %xmm1
-; STRICT-NEXT: andpd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: oge_x:
-; RELAX: # %bb.0:
-; RELAX-NEXT: xorpd %xmm1, %xmm1
-; RELAX-NEXT: maxsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: oge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: cmplesd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @ole_x(double %x) {
-; STRICT-LABEL: ole_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmplesd %xmm1, %xmm2
-; STRICT-NEXT: andpd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ole_x:
-; RELAX: # %bb.0:
-; RELAX-NEXT: xorpd %xmm1, %xmm1
-; RELAX-NEXT: minsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ole_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmplesd %xmm1, %xmm2
+; CHECK-NEXT: andpd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @oge_inverse_x(double %x) {
-; STRICT-LABEL: oge_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: cmplesd %xmm0, %xmm1
-; STRICT-NEXT: andnpd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: oge_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: oge_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: oge_inverse_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: cmplesd %xmm0, %xmm1
+; CHECK-NEXT: andnpd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @ole_inverse_x(double %x) {
-; STRICT-LABEL: ole_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm2, %xmm2
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmplesd %xmm2, %xmm1
-; STRICT-NEXT: andnpd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ole_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ole_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: ole_inverse_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm2, %xmm2
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmplesd %xmm2, %xmm1
+; CHECK-NEXT: andnpd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @ugt(double %x, double %y) {
-; STRICT-LABEL: ugt:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ugt:
; RELAX: # %bb.0:
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ugt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @ult(double %x, double %y) {
-; STRICT-LABEL: ult:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: cmpnlesd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ult:
; RELAX: # %bb.0:
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ult:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: cmpnlesd %xmm2, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @ugt_inverse(double %x, double %y) {
-; STRICT-LABEL: ugt_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ugt_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ugt_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ugt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @ult_inverse(double %x, double %y) {
-; STRICT-LABEL: ult_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: cmpnlesd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ult_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ult_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ult_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: cmpnlesd %xmm2, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @uge(double %x, double %y) {
-; STRICT-LABEL: uge:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: uge:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: uge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @ule(double %x, double %y) {
-; STRICT-LABEL: ule:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ule:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ule:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @uge_inverse(double %x, double %y) {
-; STRICT-LABEL: uge_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: uge_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: uge_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: uge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @ule_inverse(double %x, double %y) {
-; STRICT-LABEL: ule_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ule_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ule_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ule_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @ugt_x(double %x) {
-; STRICT-LABEL: ugt_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmpnlesd %xmm1, %xmm2
-; STRICT-NEXT: andpd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ugt_x:
; RELAX: # %bb.0:
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ugt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmpnlesd %xmm1, %xmm2
+; CHECK-NEXT: andpd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @ult_x(double %x) {
-; STRICT-LABEL: ult_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT: andpd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ult_x:
; RELAX: # %bb.0:
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ult_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: cmpnlesd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @ugt_inverse_x(double %x) {
-; STRICT-LABEL: ugt_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm2, %xmm2
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT: andnpd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ugt_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ugt_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ugt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm2, %xmm2
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmpnlesd %xmm2, %xmm1
+; CHECK-NEXT: andnpd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @ult_inverse_x(double %x) {
-; STRICT-LABEL: ult_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT: andnpd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ult_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ult_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ult_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: cmpnlesd %xmm0, %xmm1
+; CHECK-NEXT: andnpd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @uge_x(double %x) {
-; STRICT-LABEL: uge_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: uge_x:
-; RELAX: # %bb.0:
-; RELAX-NEXT: xorpd %xmm1, %xmm1
-; RELAX-NEXT: maxsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: uge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @ule_x(double %x) {
-; STRICT-LABEL: ule_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ule_x:
-; RELAX: # %bb.0:
-; RELAX-NEXT: xorpd %xmm1, %xmm1
-; RELAX-NEXT: minsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ule_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @uge_inverse_x(double %x) {
-; STRICT-LABEL: uge_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: minsd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: uge_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: uge_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: uge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @ule_inverse_x(double %x) {
-; STRICT-LABEL: ule_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: maxsd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ule_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ule_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ule_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @ogt_y(double %x) {
-; ALL-LABEL: ogt_y:
-; ALL: # %bb.0:
-; ALL-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: ogt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @olt_y(double %x) {
-; ALL-LABEL: olt_y:
-; ALL: # %bb.0:
-; ALL-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: olt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @ogt_inverse_y(double %x) {
-; STRICT-LABEL: ogt_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ogt_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ogt_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ogt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @olt_inverse_y(double %x) {
-; STRICT-LABEL: olt_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: olt_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: olt_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: olt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @oge_y(double %x) {
-; STRICT-LABEL: oge_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: cmplesd %xmm1, %xmm0
-; STRICT-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: oge_y:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: oge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @ole_y(double %x) {
-; STRICT-LABEL: ole_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ole_y:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ole_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @oge_inverse_y(double %x) {
-; STRICT-LABEL: oge_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: cmplesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: oge_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: oge_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: oge_inverse_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @ole_inverse_y(double %x) {
-; STRICT-LABEL: ole_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ole_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ole_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: ole_inverse_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @ugt_y(double %x) {
-; STRICT-LABEL: ugt_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ugt_y:
; RELAX: # %bb.0:
; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ugt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @ult_y(double %x) {
-; STRICT-LABEL: ult_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ult_y:
; RELAX: # %bb.0:
; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ult_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @ugt_inverse_y(double %x) {
-; STRICT-LABEL: ugt_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ugt_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ugt_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ugt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @ult_inverse_y(double %x) {
-; STRICT-LABEL: ult_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ult_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ult_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ult_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @uge_y(double %x) {
-; STRICT-LABEL: uge_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: uge_y:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: uge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @ule_y(double %x) {
-; STRICT-LABEL: ule_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ule_y:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ule_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @uge_inverse_y(double %x) {
-; STRICT-LABEL: uge_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: uge_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: uge_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: uge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @ule_inverse_y(double %x) {
-; STRICT-LABEL: ule_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ule_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ule_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ule_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
@@ -988,332 +654,196 @@ define double @ule_inverse_y(double %x) {
; Test a few more misc. cases.
define double @clampTo3k_a(double %x) {
-; STRICT-LABEL: clampTo3k_a:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_a:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_a:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp ogt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_b(double %x) {
-; STRICT-LABEL: clampTo3k_b:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_b:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_b:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp uge double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_c(double %x) {
-; STRICT-LABEL: clampTo3k_c:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_c:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_c:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp olt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_d(double %x) {
-; STRICT-LABEL: clampTo3k_d:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_d:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_d:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp ule double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_e(double %x) {
-; STRICT-LABEL: clampTo3k_e:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_e:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_e:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_e:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp olt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_f(double %x) {
-; STRICT-LABEL: clampTo3k_f:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_f:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_f:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_f:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp ule double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_g(double %x) {
-; STRICT-LABEL: clampTo3k_g:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_g:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_g:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_g:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp ogt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_h(double %x) {
-; STRICT-LABEL: clampTo3k_h:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_h:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_h:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp uge double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
-; STRICT-LABEL: test_maxpd:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: cmplepd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_maxpd:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxpd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_maxpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: cmplepd %xmm2, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%max_is_x = fcmp oge <2 x double> %x, %y
%max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %max
}
define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
-; STRICT-LABEL: test_minpd:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmplepd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_minpd:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minpd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_minpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmplepd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%min_is_x = fcmp ole <2 x double> %x, %y
%min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %min
}
define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
-; STRICT-LABEL: test_maxps:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: cmpleps %xmm2, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_maxps:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_maxps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: cmpleps %xmm2, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%max_is_x = fcmp oge <4 x float> %x, %y
%max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %max
}
define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
-; STRICT-LABEL: test_minps:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: cmpleps %xmm1, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_minps:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_minps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: cmpleps %xmm1, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%min_is_x = fcmp ole <4 x float> %x, %y
%min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %min
}
define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
-; STRICT-LABEL: test_maxps_illegal_v2f32:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: cmpleps %xmm2, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_maxps_illegal_v2f32:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_maxps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: cmpleps %xmm2, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%max_is_x = fcmp oge <2 x float> %x, %y
%max = select <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
ret <2 x float> %max
}
define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
-; STRICT-LABEL: test_minps_illegal_v2f32:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: cmpleps %xmm1, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_minps_illegal_v2f32:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_minps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: cmpleps %xmm1, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%min_is_x = fcmp ole <2 x float> %x, %y
%min = select <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
ret <2 x float> %min
}
define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
-; STRICT-LABEL: test_maxps_illegal_v3f32:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: cmpleps %xmm2, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_maxps_illegal_v3f32:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_maxps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: cmpleps %xmm2, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%max_is_x = fcmp oge <3 x float> %x, %y
%max = select <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
ret <3 x float> %max
}
define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
-; STRICT-LABEL: test_minps_illegal_v3f32:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: cmpleps %xmm1, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_minps_illegal_v3f32:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_minps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: cmpleps %xmm1, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%min_is_x = fcmp ole <3 x float> %x, %y
%min = select <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
ret <3 x float> %min
@@ -1322,10 +852,10 @@ define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
; OSS-Fuzz #13838
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13838
define float @ossfuzz13838(float %x) {
-; ALL-LABEL: ossfuzz13838:
-; ALL: # %bb.0: # %bb
-; ALL-NEXT: movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
-; ALL-NEXT: retq
+; CHECK-LABEL: ossfuzz13838:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: retq
bb:
%cmp2 = fcmp fast olt float %x, 2.550000e+02
%B1 = urem i1 %cmp2, %cmp2
>From 43a997e5e5830c6078d1442c4180156bd2e8b339 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Fri, 28 Nov 2025 21:52:33 +0800
Subject: [PATCH 04/24] Fix PowerPC tests
---
llvm/test/CodeGen/PowerPC/change-no-infs.ll | 67 -
llvm/test/CodeGen/PowerPC/fsel.ll | 20 +-
llvm/test/CodeGen/PowerPC/scalar-equal.ll | 110 +-
llvm/test/CodeGen/PowerPC/scalar_cmp.ll | 1494 +++++++------------
4 files changed, 621 insertions(+), 1070 deletions(-)
delete mode 100644 llvm/test/CodeGen/PowerPC/change-no-infs.ll
diff --git a/llvm/test/CodeGen/PowerPC/change-no-infs.ll b/llvm/test/CodeGen/PowerPC/change-no-infs.ll
deleted file mode 100644
index 0cd5eb5408e3e..0000000000000
--- a/llvm/test/CodeGen/PowerPC/change-no-infs.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; Check that we can enable/disable NoInfsFPMath and NoNaNsInFPMath via function
-; attributes. An attribute on one function should not magically apply to the
-; next one.
-
-; RUN: llc < %s -mtriple=powerpc64-unknown-unknown -mcpu=pwr7 -mattr=-vsx \
-; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=SAFE
-
-; RUN: llc < %s -mtriple=powerpc64-unknown-unknown -mcpu=pwr7 -mattr=-vsx \
-; RUN: -enable-no-infs-fp-math -enable-no-nans-fp-math \
-; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=UNSAFE
-
-; The fcmp+select in these functions should be converted to a fsel instruction
-; when both NoInfsFPMath and NoNaNsInFPMath are enabled.
-
-; CHECK-LABEL: default0:
-define double @default0(double %a, double %y, double %z) {
-entry:
-; SAFE-NOT: fsel
-; UNSAFE: fsel
- %cmp = fcmp ult double %a, 0.000000e+00
- %z.y = select i1 %cmp, double %z, double %y
- ret double %z.y
-}
-
-; CHECK-LABEL: unsafe_math_off:
-define double @unsafe_math_off(double %a, double %y, double %z) #0 #2 {
-entry:
-; SAFE-NOT: fsel
-; UNSAFE-NOT: fsel
- %cmp = fcmp ult double %a, 0.000000e+00
- %z.y = select i1 %cmp, double %z, double %y
- ret double %z.y
-}
-
-; CHECK-LABEL: default1:
-define double @default1(double %a, double %y, double %z) {
-; SAFE-NOT: fsel
-; UNSAFE: fsel
- %cmp = fcmp ult double %a, 0.000000e+00
- %z.y = select i1 %cmp, double %z, double %y
- ret double %z.y
-}
-
-; CHECK-LABEL: unsafe_math_on:
-define double @unsafe_math_on(double %a, double %y, double %z) #1 #3 {
-entry:
-; SAFE-NOT: fsel
-; UNSAFE-NOT: fsel
- %cmp = fcmp ult double %a, 0.000000e+00
- %z.y = select i1 %cmp, double %z, double %y
- ret double %z.y
-}
-
-; CHECK-LABEL: default2:
-define double @default2(double %a, double %y, double %z) {
-; SAFE-NOT: fsel
-; UNSAFE: fsel
- %cmp = fcmp ult double %a, 0.000000e+00
- %z.y = select i1 %cmp, double %z, double %y
- ret double %z.y
-}
-
-attributes #0 = { "no-infs-fp-math"="false" }
-attributes #1 = { "no-nans-fp-math"="false" }
-
-attributes #2 = { "no-infs-fp-math"="false" }
-attributes #3 = { "no-infs-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/fsel.ll b/llvm/test/CodeGen/PowerPC/fsel.ll
index dea442d8404e1..13d94d1c28822 100644
--- a/llvm/test/CodeGen/PowerPC/fsel.ll
+++ b/llvm/test/CodeGen/PowerPC/fsel.ll
@@ -1,12 +1,12 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -enable-no-nans-fp-math -mattr=-vsx | FileCheck -check-prefix=CHECK-FM %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -enable-no-nans-fp-math -mattr=+vsx | FileCheck -check-prefix=CHECK-FM-VSX %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -mattr=-vsx | FileCheck -check-prefix=CHECK-FM %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -mattr=+vsx | FileCheck -check-prefix=CHECK-FM-VSX %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
define double @zerocmp1(double %a, double %y, double %z) #0 {
entry:
- %cmp = fcmp ult double %a, 0.000000e+00
+ %cmp = fcmp nnan ult double %a, 0.000000e+00
%z.y = select i1 %cmp, double %z, double %y
ret double %z.y
@@ -25,7 +25,7 @@ entry:
define double @zerocmp2(double %a, double %y, double %z) #0 {
entry:
- %cmp = fcmp ogt double %a, 0.000000e+00
+ %cmp = fcmp nnan ogt double %a, 0.000000e+00
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
@@ -46,7 +46,7 @@ entry:
define double @zerocmp3(double %a, double %y, double %z) #0 {
entry:
- %cmp = fcmp oeq double %a, 0.000000e+00
+ %cmp = fcmp nnan oeq double %a, 0.000000e+00
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
@@ -69,7 +69,7 @@ entry:
define double @min1(double %a, double %b) #0 {
entry:
- %cmp = fcmp ole double %a, %b
+ %cmp = fcmp nnan ole double %a, %b
%cond = select i1 %cmp, double %a, double %b
ret double %cond
@@ -90,7 +90,7 @@ entry:
define double @max1(double %a, double %b) #0 {
entry:
- %cmp = fcmp oge double %a, %b
+ %cmp = fcmp nnan oge double %a, %b
%cond = select i1 %cmp, double %a, double %b
ret double %cond
@@ -111,7 +111,7 @@ entry:
define double @cmp1(double %a, double %b, double %y, double %z) #0 {
entry:
- %cmp = fcmp ult double %a, %b
+ %cmp = fcmp nnan ult double %a, %b
%z.y = select i1 %cmp, double %z, double %y
ret double %z.y
@@ -132,7 +132,7 @@ entry:
define double @cmp2(double %a, double %b, double %y, double %z) #0 {
entry:
- %cmp = fcmp ogt double %a, %b
+ %cmp = fcmp nnan ogt double %a, %b
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
@@ -153,7 +153,7 @@ entry:
define double @cmp3(double %a, double %b, double %y, double %z) #0 {
entry:
- %cmp = fcmp oeq double %a, %b
+ %cmp = fcmp nnan oeq double %a, %b
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
diff --git a/llvm/test/CodeGen/PowerPC/scalar-equal.ll b/llvm/test/CodeGen/PowerPC/scalar-equal.ll
index c0b11b47236a9..de829b5d54dee 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-equal.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-equal.ll
@@ -1,57 +1,31 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \
-; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
-; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=FAST-P8
-; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names \
-; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
-; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=FAST-P9
; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=NO-FAST-P9
+; RUN: --check-prefix=P9
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=NO-FAST-P8
+; RUN: --check-prefix=P8
define double @testoeq(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: testoeq:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: testoeq:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
+; P9-LABEL: testoeq:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: beq cr0, .LBB0_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB0_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
;
-; NO-FAST-P9-LABEL: testoeq:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: beq cr0, .LBB0_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB0_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: testoeq:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: beq cr0, .LBB0_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB0_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
+; P8-LABEL: testoeq:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: beq cr0, .LBB0_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB0_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
entry:
%cmp = fcmp oeq double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -59,37 +33,21 @@ entry:
}
define double @testoeq_fast(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: testoeq_fast:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: testoeq_fast:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P9-LABEL: testoeq_fast:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: xsnegdp f0, f0
-; NO-FAST-P9-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P9-NEXT: blr
+; P9-LABEL: testoeq_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: xsnegdp f0, f0
+; P9-NEXT: fsel f1, f0, f1, f4
+; P9-NEXT: blr
;
-; NO-FAST-P8-LABEL: testoeq_fast:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: xsnegdp f0, f0
-; NO-FAST-P8-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P8-NEXT: blr
+; P8-LABEL: testoeq_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: xsnegdp f0, f0
+; P8-NEXT: fsel f1, f0, f1, f4
+; P8-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz oeq double %a, %b
%cond = select nnan ninf nsz i1 %cmp, double %c, double %d
diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
index 881d1f4c4093b..878b7f2449141 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
@@ -1,58 +1,36 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \
; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
+; RUN: --enable-no-nans-fp-math \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=FAST-P8
+; RUN: --check-prefix=P8
; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names \
; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
+; RUN: --enable-no-nans-fp-math \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=FAST-P9
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \
-; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=NO-FAST-P8
-; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \
-; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=NO-FAST-P9
+; RUN: --check-prefix=P9
; Test oeq
define float @select_oeq_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_oeq_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: xsnegdp f1, f0
-; FAST-P8-NEXT: fsel f0, f0, f3, f4
-; FAST-P8-NEXT: fsel f1, f1, f0, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_oeq_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: xsnegdp f1, f0
-; FAST-P9-NEXT: fsel f0, f0, f3, f4
-; FAST-P9-NEXT: fsel f1, f1, f0, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_oeq_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: beq cr0, .LBB0_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB0_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_oeq_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: beq cr0, .LBB0_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB0_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_oeq_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: beq cr0, .LBB0_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB0_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_oeq_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: beq cr0, .LBB0_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB0_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp oeq float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -60,41 +38,25 @@ entry:
}
define float @select_oeq_float_nsz(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_oeq_float_nsz:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: xssubsp f1, f1, f2
-; FAST-P8-NEXT: fsel f1, f1, f3, f4
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_oeq_float_nsz:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: xssubsp f1, f1, f2
-; FAST-P9-NEXT: fsel f1, f1, f3, f4
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_oeq_float_nsz:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: beq cr0, .LBB1_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB1_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_oeq_float_nsz:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: beq cr0, .LBB1_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB1_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_oeq_float_nsz:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: beq cr0, .LBB1_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB1_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_oeq_float_nsz:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: beq cr0, .LBB1_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB1_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nsz oeq float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -102,41 +64,25 @@ entry:
}
define double @select_oeq_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_oeq_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_oeq_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_oeq_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: beq cr0, .LBB2_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB2_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_oeq_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: beq cr0, .LBB2_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB2_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_oeq_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: beq cr0, .LBB2_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB2_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_oeq_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: beq cr0, .LBB2_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB2_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp oeq double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -144,37 +90,21 @@ entry:
}
define float @select_fast_oeq_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_oeq_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: xssubsp f1, f1, f2
-; FAST-P8-NEXT: fsel f1, f1, f3, f4
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_oeq_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: xssubsp f1, f1, f2
-; FAST-P9-NEXT: fsel f1, f1, f3, f4
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_oeq_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT: xssubsp f1, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f1, f3, f4
-; NO-FAST-P8-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_oeq_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT: xssubsp f1, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f1, f3, f4
-; NO-FAST-P9-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_oeq_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f2, f1
+; P8-NEXT: xssubsp f1, f1, f2
+; P8-NEXT: fsel f1, f1, f3, f4
+; P8-NEXT: fsel f1, f0, f1, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_oeq_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f2, f1
+; P9-NEXT: xssubsp f1, f1, f2
+; P9-NEXT: fsel f1, f1, f3, f4
+; P9-NEXT: fsel f1, f0, f1, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz oeq float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -182,37 +112,21 @@ entry:
}
define double @select_fast_oeq_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_oeq_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_oeq_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_oeq_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: xsnegdp f0, f0
-; NO-FAST-P8-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_oeq_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: xsnegdp f0, f0
-; NO-FAST-P9-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_oeq_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: xsnegdp f0, f0
+; P8-NEXT: fsel f1, f0, f1, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_oeq_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: xsnegdp f0, f0
+; P9-NEXT: fsel f1, f0, f1, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz oeq double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -222,43 +136,27 @@ entry:
; Test one
define float @select_one_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_one_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: xsnegdp f1, f0
-; FAST-P8-NEXT: fsel f0, f0, f4, f3
-; FAST-P8-NEXT: fsel f1, f1, f0, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_one_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: xsnegdp f1, f0
-; FAST-P9-NEXT: fsel f0, f0, f4, f3
-; FAST-P9-NEXT: fsel f1, f1, f0, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_one_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB5_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_one_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB5_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_one_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, eq
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB5_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_one_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, eq
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB5_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp one float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -266,43 +164,27 @@ entry:
}
define float @select_one_float_nsz(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_one_float_nsz:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: xssubsp f1, f1, f2
-; FAST-P8-NEXT: fsel f1, f1, f4, f3
-; FAST-P8-NEXT: fsel f1, f0, f1, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_one_float_nsz:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: xssubsp f1, f1, f2
-; FAST-P9-NEXT: fsel f1, f1, f4, f3
-; FAST-P9-NEXT: fsel f1, f0, f1, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_one_float_nsz:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB6_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_one_float_nsz:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB6_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_one_float_nsz:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, eq
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB6_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_one_float_nsz:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, eq
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB6_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nsz one float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -310,43 +192,27 @@ entry:
}
define double @select_one_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_one_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_one_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_one_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB7_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_one_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB7_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_one_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, eq
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB7_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_one_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, eq
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB7_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp one double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -354,37 +220,21 @@ entry:
}
define float @select_fast_one_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_one_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: xssubsp f1, f1, f2
-; FAST-P8-NEXT: fsel f1, f1, f4, f3
-; FAST-P8-NEXT: fsel f1, f0, f1, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_one_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: xssubsp f1, f1, f2
-; FAST-P9-NEXT: fsel f1, f1, f4, f3
-; FAST-P9-NEXT: fsel f1, f0, f1, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_one_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT: xssubsp f1, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f1, f4, f3
-; NO-FAST-P8-NEXT: fsel f1, f0, f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_one_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT: xssubsp f1, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f1, f4, f3
-; NO-FAST-P9-NEXT: fsel f1, f0, f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_one_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f2, f1
+; P8-NEXT: xssubsp f1, f1, f2
+; P8-NEXT: fsel f1, f1, f4, f3
+; P8-NEXT: fsel f1, f0, f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_one_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f2, f1
+; P9-NEXT: xssubsp f1, f1, f2
+; P9-NEXT: fsel f1, f1, f4, f3
+; P9-NEXT: fsel f1, f0, f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz one float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -392,37 +242,21 @@ entry:
}
define double @select_fast_one_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_one_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_one_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_one_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT: xsnegdp f0, f0
-; NO-FAST-P8-NEXT: fsel f1, f0, f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_one_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT: xsnegdp f0, f0
-; NO-FAST-P9-NEXT: fsel f1, f0, f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_one_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f4, f3
+; P8-NEXT: xsnegdp f0, f0
+; P8-NEXT: fsel f1, f0, f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_one_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f4, f3
+; P9-NEXT: xsnegdp f0, f0
+; P9-NEXT: fsel f1, f0, f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz one double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -432,39 +266,27 @@ entry:
; Test oge
define float @select_oge_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_oge_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_oge_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_oge_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB10_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_oge_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB10_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_oge_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, lt
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB10_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_oge_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, lt
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB10_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp oge float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -472,39 +294,27 @@ entry:
}
define double @select_oge_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_oge_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_oge_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_oge_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB11_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_oge_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB11_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_oge_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, lt
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB11_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_oge_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, lt
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB11_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp oge double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -512,29 +322,17 @@ entry:
}
define float @select_fast_oge_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_oge_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_oge_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_oge_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_oge_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_oge_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_oge_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz oge float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -542,29 +340,17 @@ entry:
}
define double @select_fast_oge_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_oge_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_oge_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_oge_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_oge_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_oge_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_oge_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz oge double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -574,37 +360,25 @@ entry:
; Test olt
define float @select_olt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_olt_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_olt_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_olt_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: blt cr0, .LBB14_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB14_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_olt_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: blt cr0, .LBB14_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB14_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_olt_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: blt cr0, .LBB14_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB14_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_olt_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: blt cr0, .LBB14_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB14_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp olt float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -612,37 +386,25 @@ entry:
}
define double @select_olt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_olt_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_olt_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_olt_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: blt cr0, .LBB15_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB15_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_olt_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: blt cr0, .LBB15_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB15_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_olt_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: blt cr0, .LBB15_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB15_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_olt_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: blt cr0, .LBB15_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB15_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp olt double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -650,29 +412,17 @@ entry:
}
define float @select_fast_olt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_olt_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_olt_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_olt_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_olt_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_olt_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f4, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_olt_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f4, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ninf nnan nsz olt float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -680,29 +430,17 @@ entry:
}
define double @select_fast_olt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_olt_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_olt_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_olt_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_olt_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_olt_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f4, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_olt_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f4, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz olt double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -712,37 +450,25 @@ entry:
; Test ogt
define float @select_ogt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_ogt_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_ogt_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_ogt_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: bgt cr0, .LBB18_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB18_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_ogt_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: bgt cr0, .LBB18_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB18_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_ogt_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: bgt cr0, .LBB18_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB18_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_ogt_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: bgt cr0, .LBB18_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB18_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ogt float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -750,37 +476,25 @@ entry:
}
define double @select_ogt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_ogt_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_ogt_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_ogt_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: bgt cr0, .LBB19_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB19_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_ogt_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: bgt cr0, .LBB19_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB19_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_ogt_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: bgt cr0, .LBB19_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB19_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_ogt_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: bgt cr0, .LBB19_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB19_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ogt double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -788,29 +502,17 @@ entry:
}
define float @select_fast_ogt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_ogt_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_ogt_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_ogt_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_ogt_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_ogt_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f2, f1
+; P8-NEXT: fsel f1, f0, f4, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_ogt_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f2, f1
+; P9-NEXT: fsel f1, f0, f4, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz ogt float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -818,29 +520,17 @@ entry:
}
define double @select_fast_ogt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_ogt_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_ogt_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_ogt_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f2, f1
-; NO-FAST-P8-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_ogt_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f2, f1
-; NO-FAST-P9-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_ogt_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f2, f1
+; P8-NEXT: fsel f1, f0, f4, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_ogt_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f2, f1
+; P9-NEXT: fsel f1, f0, f4, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz ogt double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -850,39 +540,27 @@ entry:
; Test ole
define float @select_ole_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_ole_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_ole_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_ole_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB22_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_ole_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB22_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_ole_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, gt
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB22_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_ole_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, gt
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB22_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ole float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -890,39 +568,27 @@ entry:
}
define double @select_ole_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_ole_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_ole_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_ole_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB23_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_ole_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB23_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_ole_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, gt
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB23_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_ole_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, gt
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB23_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ole double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -930,29 +596,17 @@ entry:
}
define float @select_fast_ole_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_ole_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_ole_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_ole_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_ole_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_ole_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f2, f1
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_ole_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f2, f1
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz ole float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -960,29 +614,17 @@ entry:
}
define double @select_fast_ole_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_ole_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_ole_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_ole_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f2, f1
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_ole_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f2, f1
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_ole_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f2, f1
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_ole_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f2, f1
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz ole double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -991,149 +633,167 @@ entry:
; Test RHS is 1.000000e+00
define double @onecmp1(double %a, double %y, double %z) {
-; FAST-P8-LABEL: onecmp1:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: vspltisw v2, -1
-; FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P8-NEXT: xsadddp f0, f1, f0
-; FAST-P8-NEXT: fsel f1, f0, f2, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: onecmp1:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: vspltisw v2, -1
-; FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P9-NEXT: xsadddp f0, f1, f0
-; FAST-P9-NEXT: fsel f1, f0, f2, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: onecmp1:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: vspltisw v2, 1
-; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f0
-; NO-FAST-P8-NEXT: bc 12, lt, .LBB26_3
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f1
-; NO-FAST-P8-NEXT: bc 12, un, .LBB26_3
-; NO-FAST-P8-NEXT: # %bb.2: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f2
-; NO-FAST-P8-NEXT: .LBB26_3: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: onecmp1:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: vspltisw v2, 1
-; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f0
-; NO-FAST-P9-NEXT: bc 12, lt, .LBB26_3
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f1
-; NO-FAST-P9-NEXT: bc 12, un, .LBB26_3
-; NO-FAST-P9-NEXT: # %bb.2: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f2
-; NO-FAST-P9-NEXT: .LBB26_3: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: onecmp1:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, 1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: fcmpu cr0, f1, f0
+; P8-NEXT: bc 12, lt, .LBB26_3
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fcmpu cr0, f1, f1
+; P8-NEXT: bc 12, un, .LBB26_3
+; P8-NEXT: # %bb.2: # %entry
+; P8-NEXT: fmr f3, f2
+; P8-NEXT: .LBB26_3: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp1:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, 1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: fcmpu cr0, f1, f0
+; P9-NEXT: bc 12, lt, .LBB26_3
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fcmpu cr0, f1, f1
+; P9-NEXT: bc 12, un, .LBB26_3
+; P9-NEXT: # %bb.2: # %entry
+; P9-NEXT: fmr f3, f2
+; P9-NEXT: .LBB26_3: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ult double %a, 1.000000e+00
%z.y = select i1 %cmp, double %z, double %y
ret double %z.y
}
+define double @onecmp1_fast(double %a, double %y, double %z) {
+; P8-LABEL: onecmp1_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, -1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: xsadddp f0, f1, f0
+; P8-NEXT: fsel f1, f0, f2, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp1_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, -1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: xsadddp f0, f1, f0
+; P9-NEXT: fsel f1, f0, f2, f3
+; P9-NEXT: blr
+entry:
+ %cmp = fcmp nnan ninf nsz ult double %a, 1.000000e+00
+ %z.y = select i1 %cmp, double %z, double %y
+ ret double %z.y
+}
+
define double @onecmp2(double %a, double %y, double %z) {
-; FAST-P8-LABEL: onecmp2:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: vspltisw v2, 1
-; FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P8-NEXT: xssubdp f0, f0, f1
-; FAST-P8-NEXT: fsel f1, f0, f3, f2
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: onecmp2:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: vspltisw v2, 1
-; FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P9-NEXT: xssubdp f0, f0, f1
-; FAST-P9-NEXT: fsel f1, f0, f3, f2
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: onecmp2:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: vspltisw v2, 1
-; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P8-NEXT: bgt cr0, .LBB27_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f2, f3
-; NO-FAST-P8-NEXT: .LBB27_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f2
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: onecmp2:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: vspltisw v2, 1
-; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P9-NEXT: bgt cr0, .LBB27_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f2, f3
-; NO-FAST-P9-NEXT: .LBB27_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f2
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: onecmp2:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, 1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: xscmpudp cr0, f1, f0
+; P8-NEXT: bgt cr0, .LBB28_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f2, f3
+; P8-NEXT: .LBB28_2: # %entry
+; P8-NEXT: fmr f1, f2
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp2:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, 1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: xscmpudp cr0, f1, f0
+; P9-NEXT: bgt cr0, .LBB28_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f2, f3
+; P9-NEXT: .LBB28_2: # %entry
+; P9-NEXT: fmr f1, f2
+; P9-NEXT: blr
entry:
%cmp = fcmp ogt double %a, 1.000000e+00
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
}
+define double @onecmp2_fast(double %a, double %y, double %z) {
+; P8-LABEL: onecmp2_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, 1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: xssubdp f0, f0, f1
+; P8-NEXT: fsel f1, f0, f3, f2
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp2_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, 1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: xssubdp f0, f0, f1
+; P9-NEXT: fsel f1, f0, f3, f2
+; P9-NEXT: blr
+entry:
+ %cmp = fcmp nnan ninf nsz ogt double %a, 1.000000e+00
+ %y.z = select i1 %cmp, double %y, double %z
+ ret double %y.z
+}
+
define double @onecmp3(double %a, double %y, double %z) {
-; FAST-P8-LABEL: onecmp3:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: vspltisw v2, -1
-; FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P8-NEXT: xsadddp f0, f1, f0
-; FAST-P8-NEXT: fsel f1, f0, f2, f3
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: onecmp3:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: vspltisw v2, -1
-; FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P9-NEXT: xsadddp f0, f1, f0
-; FAST-P9-NEXT: fsel f1, f0, f2, f3
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: onecmp3:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: vspltisw v2, 1
-; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P8-NEXT: beq cr0, .LBB28_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f2, f3
-; NO-FAST-P8-NEXT: .LBB28_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f2
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: onecmp3:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: vspltisw v2, 1
-; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P9-NEXT: beq cr0, .LBB28_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f2, f3
-; NO-FAST-P9-NEXT: .LBB28_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f2
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: onecmp3:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, 1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: xscmpudp cr0, f1, f0
+; P8-NEXT: beq cr0, .LBB30_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f2, f3
+; P8-NEXT: .LBB30_2: # %entry
+; P8-NEXT: fmr f1, f2
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp3:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, 1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: xscmpudp cr0, f1, f0
+; P9-NEXT: beq cr0, .LBB30_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f2, f3
+; P9-NEXT: .LBB30_2: # %entry
+; P9-NEXT: fmr f1, f2
+; P9-NEXT: blr
entry:
%cmp = fcmp oeq double %a, 1.000000e+00
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
}
+
+define double @onecmp3_fast(double %a, double %y, double %z) {
+; P8-LABEL: onecmp3_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, -1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: xsadddp f0, f1, f0
+; P8-NEXT: fsel f1, f0, f2, f3
+; P8-NEXT: xsnegdp f0, f0
+; P8-NEXT: fsel f1, f0, f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp3_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, -1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: xsadddp f0, f1, f0
+; P9-NEXT: fsel f1, f0, f2, f3
+; P9-NEXT: xsnegdp f0, f0
+; P9-NEXT: fsel f1, f0, f1, f3
+; P9-NEXT: blr
+entry:
+ %cmp = fcmp nnan ninf nsz oeq double %a, 1.000000e+00
+ %y.z = select i1 %cmp, double %y, double %z
+ ret double %y.z
+}
>From 99691aba40da408e97ac739c38fd6ac0feb19a0b Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sat, 29 Nov 2025 14:30:48 +0800
Subject: [PATCH 05/24] Fix AArch64 tests
---
.../arm64-constrained-fcmp-no-nans-opt.ll | 26 +-
.../AArch64/build-vector-dup-simd-nnan.ll | 294 ++++++++++++++++++
.../CodeGen/AArch64/build-vector-dup-simd.ll | 147 ++-------
3 files changed, 341 insertions(+), 126 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/build-vector-dup-simd-nnan.ll
diff --git a/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll b/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll
index 968acb2565b4e..2ddaf0ecf7619 100644
--- a/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm64-eabi -mattr=+fullfp16 -enable-no-nans-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-eabi -mattr=+fullfp16 | FileCheck %s
declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
@@ -7,7 +7,7 @@ declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, met
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ueq(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_ueq(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -16,7 +16,7 @@ define i1 @f32_constrained_fcmp_ueq(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, ne
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_une(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_une(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -25,7 +25,7 @@ define i1 @f32_constrained_fcmp_une(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ugt(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_ugt(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -34,7 +34,7 @@ define i1 @f32_constrained_fcmp_ugt(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, ge
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_uge(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_uge(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -43,7 +43,7 @@ define i1 @f32_constrained_fcmp_uge(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ult(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_ult(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -52,7 +52,7 @@ define i1 @f32_constrained_fcmp_ult(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, le
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ule(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_ule(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -61,7 +61,7 @@ define i1 @f32_constrained_fcmp_ule(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ueq(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_ueq(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -70,7 +70,7 @@ define i1 @f64_constrained_fcmp_ueq(double %a, double %b) nounwind ssp strictfp
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, ne
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_une(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_une(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -79,7 +79,7 @@ define i1 @f64_constrained_fcmp_une(double %a, double %b) nounwind ssp strictfp
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ugt(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_ugt(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -88,7 +88,7 @@ define i1 @f64_constrained_fcmp_ugt(double %a, double %b) nounwind ssp strictfp
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, ge
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_uge(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_uge(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -97,7 +97,7 @@ define i1 @f64_constrained_fcmp_uge(double %a, double %b) nounwind ssp strictfp
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ult(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_ult(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -106,7 +106,7 @@ define i1 @f64_constrained_fcmp_ult(double %a, double %b) nounwind ssp strictfp
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, le
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ule(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_ule(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict")
ret i1 %cmp
}
diff --git a/llvm/test/CodeGen/AArch64/build-vector-dup-simd-nnan.ll b/llvm/test/CodeGen/AArch64/build-vector-dup-simd-nnan.ll
new file mode 100644
index 0000000000000..440fd2ba7f8f7
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/build-vector-dup-simd-nnan.ll
@@ -0,0 +1,294 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK
+
+define <1 x float> @dup_v1i32_oeq(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_oeq:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan oeq float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ogt(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ogt:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ogt float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_oge(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_oge:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan oge float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_olt(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_olt:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan olt float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ole(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ole:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s1, s0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ole float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_one(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_one:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan one float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ord(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ord:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s2, s0, s1
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ord float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ueq(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_ueq:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ueq float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ugt(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_ugt:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ugt float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_uge(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_uge:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan uge float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ult(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_ult:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ult float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ule(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_ule:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s1, s0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ule float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_une(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_une:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan une float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_uno(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_uno:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s2, s0, s1
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan uno float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <4 x float> @dup_v4i32(float %a, float %b) {
+; CHECK-LABEL: dup_v4i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s0, s1
+; CHECK-NEXT: dup v0.4s, v0.s[0]
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan oge float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <4 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <4 x i32> %vecinit.i to <4 x float>
+ %2 = shufflevector <4 x float> %1, <4 x float> poison, <4 x i32> zeroinitializer
+ ret <4 x float> %2
+}
+
+define <4 x float> @dup_v4i32_reversed(float %a, float %b) {
+; CHECK-LABEL: dup_v4i32_reversed:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: dup v0.4s, v0.s[0]
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ogt float %b, %a
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <4 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <4 x i32> %vecinit.i to <4 x float>
+ %2 = shufflevector <4 x float> %1, <4 x float> poison, <4 x i32> zeroinitializer
+ ret <4 x float> %2
+}
+
+define <2 x double> @dup_v2i64(double %a, double %b) {
+; CHECK-LABEL: dup_v2i64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt d0, d0, d1
+; CHECK-NEXT: dup v0.2d, v0.d[0]
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ogt double %a, %b
+ %vcmpd.i = sext i1 %0 to i64
+ %vecinit.i = insertelement <2 x i64> poison, i64 %vcmpd.i, i64 0
+ %1 = bitcast <2 x i64> %vecinit.i to <2 x double>
+ %2 = shufflevector <2 x double> %1, <2 x double> poison, <2 x i32> zeroinitializer
+ ret <2 x double> %2
+}
+
+define <8 x half> @dup_v8i16(half %a, half %b) {
+;
+;
+; FIXME: Could be replaced with fcmeq + dup but the type of the former is
+; promoted to i32 during selection and then the optimization does not apply.
+; CHECK-LABEL: dup_v8i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvt s1, h1
+; CHECK-NEXT: fcvt s0, h0
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: ret
+ entry:
+ %0 = fcmp nnan oeq half %a, %b
+ %vcmpd.i = sext i1 %0 to i16
+ %vecinit.i = insertelement <8 x i16> poison, i16 %vcmpd.i, i64 0
+ %1 = bitcast <8 x i16> %vecinit.i to <8 x half>
+ ret <8 x half> %1
+}
+
+define i32 @mask_i32(float %a, float %b) {
+; CHECK-LABEL: mask_i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmp s0, s1
+; CHECK-NEXT: csetm w0, eq
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan oeq float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ ret i32 %vcmpd.i
+}
+
+; Verify that a mask is not emitted when (allOnes, allZeros) are not the
+; operands for the SELECT_CC.
+define i32 @bool_i32(float %a, float %b) {
+; CHECK-LABEL: bool_i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmp s0, s1
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan oeq float %a, %b
+ %vcmpd.i = zext i1 %0 to i32
+ ret i32 %vcmpd.i
+}
diff --git a/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll b/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll
index ac0b8e89519dd..2649215d97203 100644
--- a/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll
+++ b/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-NOFULLFP16
-; RUN: llc < %s -mtriple=aarch64 --enable-no-nans-fp-math | FileCheck %s --check-prefixes=CHECK,CHECK-NONANS
; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FULLFP16
define <1 x float> @dup_v1i32_oeq(float %a, float %b) {
@@ -69,27 +68,13 @@ entry:
}
define <1 x float> @dup_v1i32_one(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_one:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmgt s2, s0, s1
-; CHECK-NOFULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-NOFULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-NOFULLFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_one:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmeq s0, s0, s1
-; CHECK-NONANS-NEXT: mvn v0.8b, v0.8b
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_one:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmgt s2, s0, s1
-; CHECK-FULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-FULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-FULLFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_one:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s2, s0, s1
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
entry:
%0 = fcmp one float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -115,26 +100,13 @@ entry:
}
define <1 x float> @dup_v1i32_ueq(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ueq:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmgt s2, s0, s1
-; CHECK-NOFULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-NOFULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ueq:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmeq s0, s0, s1
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ueq:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmgt s2, s0, s1
-; CHECK-FULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-FULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_ueq:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s2, s0, s1
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
entry:
%0 = fcmp ueq float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -144,22 +116,11 @@ entry:
}
define <1 x float> @dup_v1i32_ugt(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ugt:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmge s0, s1, s0
-; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ugt:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmgt s0, s0, s1
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ugt:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmge s0, s1, s0
-; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_ugt:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s1, s0
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
entry:
%0 = fcmp ugt float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -169,22 +130,11 @@ entry:
}
define <1 x float> @dup_v1i32_uge(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_uge:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_uge:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmge s0, s0, s1
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_uge:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_uge:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
entry:
%0 = fcmp uge float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -194,22 +144,11 @@ entry:
}
define <1 x float> @dup_v1i32_ult(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ult:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmge s0, s0, s1
-; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ult:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmgt s0, s1, s0
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ult:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmge s0, s0, s1
-; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_ult:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s0, s1
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
entry:
%0 = fcmp ult float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -219,22 +158,11 @@ entry:
}
define <1 x float> @dup_v1i32_ule(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ule:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmgt s0, s0, s1
-; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ule:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmge s0, s1, s0
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ule:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmgt s0, s0, s1
-; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_ule:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s0, s1
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
entry:
%0 = fcmp ule float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -326,13 +254,6 @@ define <8 x half> @dup_v8i16(half %a, half %b) {
; CHECK-NOFULLFP16-NEXT: fcmeq s0, s0, s1
; CHECK-NOFULLFP16-NEXT: ret
;
-; CHECK-NONANS-LABEL: dup_v8i16:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcvt s1, h1
-; CHECK-NONANS-NEXT: fcvt s0, h0
-; CHECK-NONANS-NEXT: fcmeq s0, s0, s1
-; CHECK-NONANS-NEXT: ret
-;
; CHECK-FULLFP16-LABEL: dup_v8i16:
; CHECK-FULLFP16: // %bb.0: // %entry
; CHECK-FULLFP16-NEXT: fcmp h0, h1
>From 214ba1d970ba6e94496d74f5b61b4fe99019d245 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sun, 30 Nov 2025 21:38:26 +0800
Subject: [PATCH 06/24] [DAGCombiner] Consider fast-math flags from SetCC in
getMinMaxOpcodeForFP
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 14 +++++++++++---
1 file changed, 11 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b8a61f0f63758..3e1aede08560c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6704,18 +6704,21 @@ static unsigned getMinMaxOpcodeForClamp(bool IsMin, SDValue Operand1,
// FIXME: use FMINIMUMNUM if possible, such as for RISC-V.
static unsigned getMinMaxOpcodeForCompareFold(
- SDValue Operand1, SDValue Operand2, ISD::CondCode CC, unsigned OrAndOpcode,
+ SDValue Operand1, SDValue Operand2, SDNodeFlags LHSSetCCFlags,
+ SDNodeFlags RHSSetCCFlags, ISD::CondCode CC, unsigned OrAndOpcode,
SelectionDAG &DAG, bool isFMAXNUMFMINNUM_IEEE, bool isFMAXNUMFMINNUM) {
// The optimization cannot be applied for all the predicates because
// of the way FMINNUM/FMAXNUM and FMINNUM_IEEE/FMAXNUM_IEEE handle
// NaNs. For FMINNUM_IEEE/FMAXNUM_IEEE, the optimization cannot be
// applied at all if one of the operands is a signaling NaN.
+ bool SetCCNoNaNs = LHSSetCCFlags.hasNoNaNs() && RHSSetCCFlags.hasNoNaNs();
+
// It is safe to use FMINNUM_IEEE/FMAXNUM_IEEE if all the operands
// are non NaN values.
if (((CC == ISD::SETLT || CC == ISD::SETLE) && (OrAndOpcode == ISD::OR)) ||
((CC == ISD::SETGT || CC == ISD::SETGE) && (OrAndOpcode == ISD::AND))) {
- return arebothOperandsNotNan(Operand1, Operand2, DAG) &&
+ return (SetCCNoNaNs || arebothOperandsNotNan(Operand1, Operand2, DAG)) &&
isFMAXNUMFMINNUM_IEEE
? ISD::FMINNUM_IEEE
: ISD::DELETED_NODE;
@@ -6723,7 +6726,7 @@ static unsigned getMinMaxOpcodeForCompareFold(
if (((CC == ISD::SETGT || CC == ISD::SETGE) && (OrAndOpcode == ISD::OR)) ||
((CC == ISD::SETLT || CC == ISD::SETLE) && (OrAndOpcode == ISD::AND))) {
- return arebothOperandsNotNan(Operand1, Operand2, DAG) &&
+ return (SetCCNoNaNs || arebothOperandsNotNan(Operand1, Operand2, DAG)) &&
isFMAXNUMFMINNUM_IEEE
? ISD::FMAXNUM_IEEE
: ISD::DELETED_NODE;
@@ -6772,6 +6775,8 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
!LHS->hasOneUse() || !RHS->hasOneUse())
return SDValue();
+ SDNodeFlags LHSSetCCFlags = LHS->getFlags();
+ SDNodeFlags RHSSetCCFlags = RHS->getFlags();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
AndOrSETCCFoldKind TargetPreference = TLI.isDesirableToCombineLogicOpOfSETCC(
LogicOp, LHS.getNode(), RHS.getNode());
@@ -6868,6 +6873,9 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM);
if (NewOpcode != ISD::DELETED_NODE) {
+ // Propagate fast-math flags from setcc.
+ SelectionDAG::FlagInserter FlagInserter(DAG, LHS->getFlags() &
+ RHS->getFlags());
SDValue MinMaxValue =
DAG.getNode(NewOpcode, DL, OpVT, Operand1, Operand2);
return DAG.getSetCC(DL, VT, MinMaxValue, CommonValue, CC);
>From f855db4a06a6d812127654de3229efd65314afd8 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sun, 30 Nov 2025 21:39:30 +0800
Subject: [PATCH 07/24] [AMDGPU] Consider fast-math flags from source in
isCanonicalized
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 3 ++-
llvm/lib/Target/AMDGPU/SIISelLowering.h | 2 +-
llvm/lib/Target/AMDGPU/SIInstrInfo.td | 6 ++++--
llvm/lib/Target/AMDGPU/SIInstructions.td | 5 +----
4 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b82000e3c1968..f959c69ae9b6a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -14629,6 +14629,7 @@ SDValue SITargetLowering::performRcpCombine(SDNode *N,
}
bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
+ SDNodeFlags UserFlags,
unsigned MaxDepth) const {
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::FCANONICALIZE)
@@ -14828,7 +14829,7 @@ bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
// FIXME: denormalsEnabledForType is broken for dynamic
return denormalsEnabledForType(DAG, Op.getValueType()) &&
- DAG.isKnownNeverSNaN(Op);
+ (UserFlags.hasNoNaNs() || DAG.isKnownNeverSNaN(Op));
}
bool SITargetLowering::isCanonicalized(Register Reg, const MachineFunction &MF,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index d56e5ea1f9685..d8c487529e54a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -559,7 +559,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
Register N1) const override;
bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
- unsigned MaxDepth = 5) const;
+ SDNodeFlags UserFlags = {}, unsigned MaxDepth = 5) const;
bool isCanonicalized(Register Reg, const MachineFunction &MF,
unsigned MaxDepth = 5) const;
bool denormalsEnabledForType(const SelectionDAG &DAG, EVT VT) const;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 41074dd75b90a..ce0f1778df2b3 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1004,11 +1004,13 @@ def MFMALdScaleXForm : SDNodeXForm<timm, [{
return CurDAG->getTargetConstant(New, SDLoc(N), MVT::i32);
}]>;
-def is_canonicalized : PatLeaf<(fAny srcvalue:$src), [{
+def fcanonicalize_canonicalized
+ : PatFrag<(ops node:$op), (fcanonicalize node:$op), [{
const SITargetLowering &Lowering =
*static_cast<const SITargetLowering *>(getTargetLowering());
- return Lowering.isCanonicalized(*CurDAG, Op);
+ return Lowering.isCanonicalized(*CurDAG, Op->getOperand(0), N->getFlags());
}]> {
+ // FIXME: This predicate for GlobalISel is dead code.
let GISelPredicateCode = [{
const SITargetLowering *TLI = static_cast<const SITargetLowering *>(
MF.getSubtarget().getTargetLowering());
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index ac98ca881d388..bc5c0b9207a44 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -3576,10 +3576,7 @@ def : GCNPat<
// If fcanonicalize's operand is implicitly canonicalized, we only need a copy.
let AddedComplexity = 8 in {
foreach vt = [f16, v2f16, f32, v2f32, f64] in {
- def : GCNPat<
- (fcanonicalize (vt is_canonicalized:$src)),
- (COPY vt:$src)
- >;
+ def : GCNPat<(fcanonicalize_canonicalized vt:$src), (COPY vt:$src)>;
}
}
>From 8de6337a9162db3046dfce7d5682ab47c112e87e Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sun, 30 Nov 2025 21:40:19 +0800
Subject: [PATCH 08/24] Fix AMDGPU tests
---
.../CodeGen/AMDGPU/combine_andor_with_cmps.ll | 1197 +++++---------
.../AMDGPU/combine_andor_with_cmps_nnan.ll | 1449 +++++++++++++++++
llvm/test/CodeGen/AMDGPU/fmax_legacy.ll | 4 +-
llvm/test/CodeGen/AMDGPU/fmin_legacy.ll | 4 +-
llvm/test/CodeGen/AMDGPU/fold-fabs.ll | 10 +-
.../transform-block-with-return-to-epilog.ll | 2 +-
6 files changed, 1862 insertions(+), 804 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps_nnan.ll
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
index ec92edbe2bf65..114392c688f94 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck %s -check-prefixes=GCN,GFX11,GFX11-TRUE16
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck %s -check-prefixes=GCN,GFX11,GFX11-FAKE16
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 -enable-no-nans-fp-math < %s | FileCheck %s -check-prefixes=GCN,GFX11NONANS,GCN-TRUE16,GFX11NONANS-TRUE16
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 -enable-no-nans-fp-math < %s | FileCheck %s -check-prefixes=GCN,GFX11NONANS,GCN-FAKE16,GFX11NONANS-FAKE16
; The tests check the following optimization of DAGCombiner:
; CMP(A,C)||CMP(B,C) => CMP(MIN/MAX(A,B), C)
@@ -855,21 +853,13 @@ define i1 @test57(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test58(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test58:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test58:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test58:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ugt double %arg1, %arg3
%cmp2 = fcmp ugt double %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -877,21 +867,13 @@ define i1 @test58(double %arg1, double %arg2, double %arg3) #0 {
}
define i1 @test59(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test59:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test59:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test59:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp uge float %arg1, %arg3
%cmp2 = fcmp uge float %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -899,21 +881,13 @@ define i1 @test59(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test60(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test60:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test60:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test60:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ule float %arg1, %arg3
%cmp2 = fcmp ule float %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -921,21 +895,13 @@ define i1 @test60(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test61(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test61:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test61:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test61:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult double %arg1, %arg3
%cmp2 = fcmp ult double %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -1083,22 +1049,14 @@ define i1 @test69(double %arg1, double %arg2, double %arg3) {
}
define i1 @test70(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test70:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test70:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test70:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp olt float %var1, %arg3
@@ -1144,22 +1102,14 @@ define i1 @test72(double %arg1, double %arg2, double %arg3) {
}
define i1 @test73(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test73:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test73:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test73:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp oge float %var1, %arg3
@@ -1169,25 +1119,15 @@ define i1 @test73(float %arg1, float %arg2, float %arg3) {
}
define i1 @test74(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test74:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test74:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test74:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ugt double %var1, %arg3
@@ -1197,22 +1137,14 @@ define i1 @test74(double %arg1, double %arg2, double %arg3) {
}
define i1 @test75(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test75:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test75:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test75:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp uge float %var1, %arg3
@@ -1222,22 +1154,14 @@ define i1 @test75(float %arg1, float %arg2, float %arg3) {
}
define i1 @test76(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test76:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test76:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test76:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp ule float %var1, %arg3
@@ -1247,25 +1171,15 @@ define i1 @test76(float %arg1, float %arg2, float %arg3) {
}
define i1 @test77(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test77:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test77:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test77:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ult double %var1, %arg3
@@ -1289,21 +1203,13 @@ define i1 @test78(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test79(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test79:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test79:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test79:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult float %arg1, %arg3
%cmp2 = fcmp ugt float %arg3, %arg2
%and1 = and i1 %cmp1, %cmp2
@@ -1364,22 +1270,14 @@ define i1 @test82(double %arg1, double %arg2, double %arg3) {
}
define i1 @test83(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test83:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test83:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test83:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp ule float %var1, %arg3
@@ -1408,7 +1306,6 @@ define i1 @test84(half %arg1, half %arg2, half %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0, v2
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test84:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1416,7 +1313,6 @@ define i1 @test84(half %arg1, half %arg2, half %arg3) {
; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test84:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1458,7 +1354,6 @@ define <2 x i1> @test85(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v3, v1
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test85:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1468,7 +1363,6 @@ define <2 x i1> @test85(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GCN-TRUE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v1.h, v2.h
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test85:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1514,7 +1408,6 @@ define <2 x i1> @test86(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v3, v1
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test86:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1524,7 +1417,6 @@ define <2 x i1> @test86(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test86:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1564,7 +1456,6 @@ define i1 @test87(half %arg1, half %arg2, half %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0, v2
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test87:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1572,7 +1463,6 @@ define i1 @test87(half %arg1, half %arg2, half %arg3) {
; GCN-TRUE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test87:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1614,26 +1504,24 @@ define <2 x i1> @test88(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v1
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test88:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-TRUE16-NEXT: v_pk_min_f16 v1, v0, v1
-; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v2.h
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test88:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-FAKE16-NEXT: v_pk_min_f16 v0, v0, v1
; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v1
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
%var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
@@ -1664,20 +1552,18 @@ define i1 @test89(half %arg1, half %arg2, half %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_nlt_f16_e32 vcc_lo, v0, v2
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test89:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l
-; GCN-TRUE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cmp_nlt_f16_e32 vcc_lo, v0.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test89:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1
-; GCN-FAKE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cmp_nlt_f16_e32 vcc_lo, v0, v2
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
%var1 = call half @llvm.canonicalize.f16(half %arg1)
@@ -1708,20 +1594,18 @@ define i1 @test90(half %arg1, half %arg2, half %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test90:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l
-; GCN-TRUE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test90:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1
-; GCN-FAKE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
%var1 = call half @llvm.canonicalize.f16(half %arg1)
@@ -1758,26 +1642,24 @@ define <2 x i1> @test91(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_nge_f16_e32 vcc_lo, v3, v1
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test91:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v0, v1
-; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cmp_nge_f16_e32 vcc_lo, v1.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cmp_nge_f16_e32 vcc_lo, v1.h, v2.h
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test91:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v1
; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GCN-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cmp_nge_f16_e32 vcc_lo, v0, v2
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cmp_nge_f16_e32 vcc_lo, v3, v1
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
%var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
@@ -2175,21 +2057,13 @@ define i1 @test107(float %arg1, float %arg2, float %arg3, float %C) {
}
define i1 @test108(float %arg1, float %arg2, float %arg3, float %C) {
-; GFX11-LABEL: test108:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max3_f32 v0, v0, v1, v2
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v3
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test108:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v3
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test108:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult float %arg1, %C
%cmp2 = fcmp ult float %arg2, %C
%cmp3 = fcmp ult float %arg3, %C
@@ -2199,27 +2073,17 @@ define i1 @test108(float %arg1, float %arg2, float %arg3, float %C) {
}
define i1 @test109(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
-; GFX11-LABEL: test109:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11-NEXT: v_cmp_gt_f32_e64 s0, v1, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test109:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11NONANS-NEXT: v_cmp_gt_f32_e64 s0, v1, v4
-; GFX11NONANS-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test109:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v1, v4
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C
%cmp2 = fcmp olt float %arg2, %C
%cmp3 = fcmp ogt float %arg3, %C
@@ -2257,28 +2121,17 @@ define i1 @test110(float %arg1, float %arg2, float %arg3, float %arg4, float %C1
}
define i1 @test111(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
-; GFX11-LABEL: test111:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT: v_dual_min_f32 v2, v2, v3 :: v_dual_max_f32 v3, v4, v4
-; GFX11-NEXT: v_min3_f32 v0, v0, v1, v2
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v3
-; GFX11-NEXT: v_min3_f32 v0, v5, v6, v0
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test111:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT: v_min3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v4
-; GFX11NONANS-NEXT: v_min3_f32 v0, v5, v6, v0
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test111:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT: v_dual_min_f32 v2, v2, v3 :: v_dual_max_f32 v3, v4, v4
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_min_f32_e32 v0, v0, v3
+; GCN-NEXT: v_min3_f32 v0, v5, v6, v0
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C
%cmp2 = fcmp olt float %arg2, %C
%or1 = or i1 %cmp1, %cmp2
@@ -2298,30 +2151,19 @@ define i1 @test111(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
}
define i1 @test112(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
-; GFX11-LABEL: test112:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, v4, v8
-; GFX11-NEXT: v_dual_max_f32 v5, v5, v5 :: v_dual_min_f32 v2, v2, v3
-; GFX11-NEXT: v_max_f32_e32 v3, v6, v6
-; GFX11-NEXT: v_min3_f32 v0, v0, v1, v2
-; GFX11-NEXT: v_min3_f32 v0, v0, v5, v3
-; GFX11-NEXT: v_cmp_lt_f32_e64 s0, v0, v8
-; GFX11-NEXT: s_or_b32 s0, s0, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test112:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT: v_min3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v4
-; GFX11NONANS-NEXT: v_min3_f32 v0, v5, v6, v0
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test112:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc_lo, v4, v8
+; GCN-NEXT: v_dual_max_f32 v5, v5, v5 :: v_dual_min_f32 v2, v2, v3
+; GCN-NEXT: v_max_f32_e32 v3, v6, v6
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_min3_f32 v0, v0, v5, v3
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v0, v8
+; GCN-NEXT: s_or_b32 s0, s0, vcc_lo
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C
%cmp2 = fcmp olt float %arg2, %C
%or1 = or i1 %cmp1, %cmp2
@@ -2341,24 +2183,16 @@ define i1 @test112(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
}
define i1 @test113(float %arg1, float %arg2, float %arg3, float %C) {
-; GFX11-LABEL: test113:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_nge_f32_e64 s0, v0, v3
-; GFX11-NEXT: s_or_b32 s0, s0, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test113:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_maxmin_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v3
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test113:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v2, v3
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_nge_f32_e64 s0, v0, v3
+; GCN-NEXT: s_or_b32 s0, s0, vcc_lo
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult float %arg1, %C
%cmp2 = fcmp ult float %arg2, %C
%cmp3 = fcmp olt float %arg3, %C
@@ -2368,26 +2202,16 @@ define i1 @test113(float %arg1, float %arg2, float %arg3, float %C) {
}
define i1 @test114(float %arg1, float %arg2, float %arg3, float %C) {
-; GFX11-LABEL: test114:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_gt_f32_e64 s0, v0, v3
-; GFX11-NEXT: s_and_b32 s0, s0, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test114:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v2, v3
-; GFX11NONANS-NEXT: v_cmp_gt_f32_e64 s0, v0, v3
-; GFX11NONANS-NEXT: s_and_b32 s0, s0, vcc_lo
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test114:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc_lo, v2, v3
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v0, v3
+; GCN-NEXT: s_and_b32 s0, s0, vcc_lo
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ogt float %arg1, %C
%cmp2 = fcmp ogt float %arg2, %C
%cmp3 = fcmp ult float %arg3, %C
@@ -2397,26 +2221,17 @@ define i1 @test114(float %arg1, float %arg2, float %arg3, float %C) {
}
define i1 @test115(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
-; GFX11-LABEL: test115:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v3, v3, v3
-; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11-NEXT: v_cmp_nge_f32_e64 s0, v1, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test115:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT: v_min3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test115:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v3, v3, v3
+; GCN-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT: v_cmp_nge_f32_e64 s0, v1, v4
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C
%cmp2 = fcmp olt float %arg2, %C
%var3 = call float @llvm.canonicalize.f32(float %arg3)
@@ -2430,44 +2245,27 @@ define i1 @test115(float %arg1, float %arg2, float %arg3, float %arg4, float %C)
}
define i1 @test116(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %C) {
-; GFX11-LABEL: test116:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v9, v9, v9 :: v_dual_max_f32 v8, v8, v8
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT: v_dual_max_f32 v5, v5, v5 :: v_dual_max_f32 v4, v4, v4
-; GFX11-NEXT: v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v6, v6, v6
-; GFX11-NEXT: v_min_f32_e32 v8, v8, v9
-; GFX11-NEXT: v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
-; GFX11-NEXT: v_max_f32_e32 v4, v6, v7
-; GFX11-NEXT: v_min3_f32 v0, v0, v1, v8
-; GFX11-NEXT: v_cmp_gt_f32_e32 vcc_lo, v2, v10
-; GFX11-NEXT: v_cmp_lt_f32_e64 s0, v3, v10
-; GFX11-NEXT: v_cmp_gt_f32_e64 s1, v4, v10
-; GFX11-NEXT: v_cmp_lt_f32_e64 s2, v0, v10
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, vcc_lo
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test116:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v8, v8, v9
-; GFX11NONANS-NEXT: v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
-; GFX11NONANS-NEXT: v_max_f32_e32 v4, v6, v7
-; GFX11NONANS-NEXT: v_min3_f32 v0, v0, v1, v8
-; GFX11NONANS-NEXT: v_cmp_gt_f32_e32 vcc_lo, v2, v10
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e64 s0, v3, v10
-; GFX11NONANS-NEXT: v_cmp_gt_f32_e64 s1, v4, v10
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e64 s2, v0, v10
-; GFX11NONANS-NEXT: s_or_b32 s0, s0, s1
-; GFX11NONANS-NEXT: s_or_b32 s1, s2, vcc_lo
-; GFX11NONANS-NEXT: s_or_b32 s0, s0, s1
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test116:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v9, v9, v9 :: v_dual_max_f32 v8, v8, v8
+; GCN-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT: v_dual_max_f32 v5, v5, v5 :: v_dual_max_f32 v4, v4, v4
+; GCN-NEXT: v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v6, v6, v6
+; GCN-NEXT: v_min_f32_e32 v8, v8, v9
+; GCN-NEXT: v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
+; GCN-NEXT: v_max_f32_e32 v4, v6, v7
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v8
+; GCN-NEXT: v_cmp_gt_f32_e32 vcc_lo, v2, v10
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v3, v10
+; GCN-NEXT: v_cmp_gt_f32_e64 s1, v4, v10
+; GCN-NEXT: v_cmp_lt_f32_e64 s2, v0, v10
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: s_or_b32 s1, s2, vcc_lo
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C
%cmp2 = fcmp olt float %arg2, %C
%cmp3 = fcmp ogt float %arg3, %C
@@ -2491,45 +2289,27 @@ define i1 @test116(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
}
define i1 @test117(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %arg11, float %arg12, float %C1, float %C2) {
-; GFX11-LABEL: test117:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v6, v6, v6
-; GFX11-NEXT: v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v10, v10, v10
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT: v_dual_max_f32 v11, v11, v11 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v7
-; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v3
-; GFX11-NEXT: v_min3_f32 v3, v4, v5, v6
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v12
-; GFX11-NEXT: v_min3_f32 v0, v8, v9, v1
-; GFX11-NEXT: v_cmp_lt_f32_e64 s0, v2, v13
-; GFX11-NEXT: v_cmp_lt_f32_e64 s1, v3, v13
-; GFX11-NEXT: v_cmp_lt_f32_e64 s2, v0, v12
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s0, s2, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test117:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v6, v6, v7
-; GFX11NONANS-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
-; GFX11NONANS-NEXT: v_min_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT: v_min3_f32 v3, v4, v5, v6
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v12
-; GFX11NONANS-NEXT: v_min3_f32 v0, v8, v9, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e64 s0, v2, v13
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e64 s1, v3, v13
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e64 s2, v0, v12
-; GFX11NONANS-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11NONANS-NEXT: s_or_b32 s0, s0, s1
-; GFX11NONANS-NEXT: s_or_b32 s0, s2, s0
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test117:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v6, v6, v6
+; GCN-NEXT: v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v10, v10, v10
+; GCN-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT: v_dual_max_f32 v11, v11, v11 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT: v_min_f32_e32 v6, v6, v7
+; GCN-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
+; GCN-NEXT: v_min_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v3, v4, v5, v6
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v12
+; GCN-NEXT: v_min3_f32 v0, v8, v9, v1
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v2, v13
+; GCN-NEXT: v_cmp_lt_f32_e64 s1, v3, v13
+; GCN-NEXT: v_cmp_lt_f32_e64 s2, v0, v12
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: s_or_b32 s0, s2, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C1
%cmp2 = fcmp olt float %arg2, %C1
%cmp3 = fcmp olt float %arg3, %C2
@@ -2661,9 +2441,10 @@ define i1 @test122(double %arg1, double %arg2, double %arg3) #1 {
; GCN-LABEL: test122:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult double %arg1, %arg3
%cmp2 = fcmp ult double %arg2, %arg3
@@ -2677,9 +2458,10 @@ define i1 @test123(double %arg1, double %arg2, double %arg3) #1 {
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
@@ -2814,7 +2596,6 @@ define i1 @test131(i16 %arg1, i32 %arg2) {
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test131:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -2823,7 +2604,6 @@ define i1 @test131(i16 %arg1, i32 %arg2) {
; GCN-TRUE16-NEXT: s_or_b32 s0, s0, vcc_lo
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test131:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -2875,22 +2655,14 @@ define i1 @test133(i32 %arg1, i32 %arg2) {
}
define i1 @test134(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test134:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_gt_f32_e64 s0, v2, v1
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test134:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test134:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v2, v1
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %arg3
%cmp2 = fcmp ogt float %arg3, %arg2
%and1 = and i1 %cmp1, %cmp2
@@ -2898,22 +2670,14 @@ define i1 @test134(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test135(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test135:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_nle_f32_e64 s0, v2, v1
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test135:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test135:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_nle_f32_e64 s0, v2, v1
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult float %arg1, %arg3
%cmp2 = fcmp ugt float %arg3, %arg2
%or1 = or i1 %cmp1, %cmp2
@@ -2921,26 +2685,16 @@ define i1 @test135(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test136(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test136:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_ge_f64_e64 s0, v[4:5], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test136:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test136:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_ge_f64_e64 s0, v[4:5], v[2:3]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ole double %var1, %arg3
@@ -2950,23 +2704,15 @@ define i1 @test136(double %arg1, double %arg2, double %arg3) {
}
define i1 @test137(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test137:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_nlt_f32_e64 s0, v2, v1
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test137:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test137:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_nlt_f32_e64 s0, v2, v1
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp ule float %var1, %arg3
@@ -2976,22 +2722,14 @@ define i1 @test137(float %arg1, float %arg2, float %arg3) {
}
define i1 @test138(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test138:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_lt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test138:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test138:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %arg3
%cmp2 = fcmp olt float %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -2999,22 +2737,14 @@ define i1 @test138(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test139(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test139:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test139:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test139:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ole double %arg1, %arg3
%cmp2 = fcmp ole double %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -3022,22 +2752,14 @@ define i1 @test139(double %arg1, double %arg2, double %arg3) #0 {
}
define i1 @test140(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test140:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test140:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test140:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ogt double %arg1, %arg3
%cmp2 = fcmp ogt double %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -3045,22 +2767,14 @@ define i1 @test140(double %arg1, double %arg2, double %arg3) #0 {
}
define i1 @test141(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test141:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_ge_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test141:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test141:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_ge_f32_e64 s0, v1, v2
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp oge float %arg1, %arg3
%cmp2 = fcmp oge float %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -3068,22 +2782,14 @@ define i1 @test141(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test142(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test142:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test142:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test142:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ugt double %arg1, %arg3
%cmp2 = fcmp ugt double %arg2, %arg3
%or1 = or i1 %cmp1, %cmp2
@@ -3091,22 +2797,14 @@ define i1 @test142(double %arg1, double %arg2, double %arg3) #0 {
}
define i1 @test143(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test143:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test143:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test143:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp uge float %arg1, %arg3
%cmp2 = fcmp uge float %arg2, %arg3
%or1 = or i1 %cmp1, %cmp2
@@ -3114,22 +2812,14 @@ define i1 @test143(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test144(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test144:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test144:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test144:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ule float %arg1, %arg3
%cmp2 = fcmp ule float %arg2, %arg3
%or1 = or i1 %cmp1, %cmp2
@@ -3137,22 +2827,14 @@ define i1 @test144(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test145(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test145:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test145:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test145:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult double %arg1, %arg3
%cmp2 = fcmp ult double %arg2, %arg3
%or1 = or i1 %cmp1, %cmp2
@@ -3160,23 +2842,15 @@ define i1 @test145(double %arg1, double %arg2, double %arg3) #0 {
}
define i1 @test146(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test146:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_lt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test146:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test146:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp olt float %var1, %arg3
@@ -3186,26 +2860,16 @@ define i1 @test146(float %arg1, float %arg2, float %arg3) {
}
define i1 @test147(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test147:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test147:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test147:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ole double %var1, %arg3
@@ -3215,26 +2879,16 @@ define i1 @test147(double %arg1, double %arg2, double %arg3) {
}
define i1 @test148(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test148:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test148:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test148:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ogt double %var1, %arg3
@@ -3244,23 +2898,15 @@ define i1 @test148(double %arg1, double %arg2, double %arg3) {
}
define i1 @test149(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test149:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_ge_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test149:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test149:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_ge_f32_e64 s0, v1, v2
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp oge float %var1, %arg3
@@ -3270,26 +2916,16 @@ define i1 @test149(float %arg1, float %arg2, float %arg3) {
}
define i1 @test150(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test150:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test150:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test150:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ugt double %var1, %arg3
@@ -3299,23 +2935,15 @@ define i1 @test150(double %arg1, double %arg2, double %arg3) {
}
define i1 @test151(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test151:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test151:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test151:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp uge float %var1, %arg3
@@ -3325,23 +2953,15 @@ define i1 @test151(float %arg1, float %arg2, float %arg3) {
}
define i1 @test152(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test152:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test152:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test152:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp ule float %var1, %arg3
@@ -3351,26 +2971,16 @@ define i1 @test152(float %arg1, float %arg2, float %arg3) {
}
define i1 @test153(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test153:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test153:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test153:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ult double %var1, %arg3
@@ -3387,5 +2997,4 @@ declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>)
attributes #0 = { nounwind "amdgpu-ieee"="false" }
attributes #1 = { nounwind "no-nans-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX11NONANS-FAKE16: {{.*}}
-; GFX11NONANS-TRUE16: {{.*}}
+; GFX11: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps_nnan.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps_nnan.ll
new file mode 100644
index 0000000000000..37ef7949fe5c9
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps_nnan.ll
@@ -0,0 +1,1449 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck %s -check-prefixes=GCN,GCN-TRUE16
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck %s -check-prefixes=GCN,GCN-FAKE16
+
+; The tests check the following optimization of DAGCombiner:
+; CMP(A,C)||CMP(B,C) => CMP(MIN/MAX(A,B), C)
+; CMP(A,C)&&CMP(B,C) => CMP(MIN/MAX(A,B), C)
+
+define i1 @test54(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test54:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %arg3
+ %cmp2 = fcmp nnan olt float %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test55(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test55:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ole double %arg1, %arg3
+ %cmp2 = fcmp nnan ole double %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test56(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test56:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ogt double %arg1, %arg3
+ %cmp2 = fcmp nnan ogt double %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test57(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test57:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan oge float %arg1, %arg3
+ %cmp2 = fcmp nnan oge float %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test58(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test58:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ugt double %arg1, %arg3
+ %cmp2 = fcmp nnan ugt double %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test59(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test59:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan uge float %arg1, %arg3
+ %cmp2 = fcmp nnan uge float %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test60(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test60:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ule float %arg1, %arg3
+ %cmp2 = fcmp nnan ule float %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test61(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test61:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult double %arg1, %arg3
+ %cmp2 = fcmp nnan ult double %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test62(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test62:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, 1.0
+ %add2 = fadd nnan float %arg2, 2.0
+ %cmp1 = fcmp nnan olt float %add1, %arg3
+ %cmp2 = fcmp nnan olt float %add2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test63(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test63:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GCN-NEXT: v_add_f64 v[2:3], v[2:3], 2.0
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan double %arg1, 1.0
+ %add2 = fadd nnan double %arg2, 2.0
+ %cmp1 = fcmp nnan ole double %add1, %arg3
+ %cmp2 = fcmp nnan ole double %add2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test64(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GCN-NEXT: v_add_f64 v[2:3], v[2:3], 2.0
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan double %arg1, 1.0
+ %add2 = fadd nnan double %arg2, 2.0
+ %cmp1 = fcmp nnan ogt double %add1, %arg3
+ %cmp2 = fcmp nnan ogt double %add2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test65(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test65:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, 1.0
+ %add2 = fadd nnan float %arg2, 2.0
+ %cmp1 = fcmp nnan oge float %add1, %arg3
+ %cmp2 = fcmp nnan oge float %add2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test66(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test66:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GCN-NEXT: v_add_f64 v[2:3], v[2:3], 2.0
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan double %arg1, 1.0
+ %add2 = fadd nnan double %arg2, 2.0
+ %cmp1 = fcmp nnan ugt double %add1, %arg3
+ %cmp2 = fcmp nnan ugt double %add2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test67(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test67:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, 1.0
+ %add2 = fadd nnan float %arg2, 2.0
+ %cmp1 = fcmp nnan uge float %add1, %arg3
+ %cmp2 = fcmp nnan uge float %add2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test68(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test68:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, 1.0
+ %add2 = fadd nnan float %arg2, 2.0
+ %cmp1 = fcmp nnan ule float %add1, %arg3
+ %cmp2 = fcmp nnan ule float %add2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test69(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test69:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GCN-NEXT: v_add_f64 v[2:3], v[2:3], 2.0
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan double %arg1, 1.0
+ %add2 = fadd nnan double %arg2, 2.0
+ %cmp1 = fcmp nnan ult double %add1, %arg3
+ %cmp2 = fcmp nnan ult double %add2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test70(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test70:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan olt float %var1, %arg3
+ %cmp2 = fcmp nnan olt float %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test71(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test71:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ole double %var1, %arg3
+ %cmp2 = fcmp nnan ole double %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test72(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test72:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ogt double %var1, %arg3
+ %cmp2 = fcmp nnan ogt double %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test73(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test73:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan oge float %var1, %arg3
+ %cmp2 = fcmp nnan oge float %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test74(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test74:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ugt double %var1, %arg3
+ %cmp2 = fcmp nnan ugt double %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test75(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test75:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan uge float %var1, %arg3
+ %cmp2 = fcmp nnan uge float %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test76(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test76:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan ule float %var1, %arg3
+ %cmp2 = fcmp nnan ule float %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test77(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test77:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ult double %var1, %arg3
+ %cmp2 = fcmp nnan ult double %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test78(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test78:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %arg3
+ %cmp2 = fcmp nnan ogt float %arg3, %arg2
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test79(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test79:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult float %arg1, %arg3
+ %cmp2 = fcmp nnan ugt float %arg3, %arg2
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test80(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test80:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, 1.0
+ %add2 = fadd nnan float %arg2, 2.0
+ %cmp1 = fcmp nnan oge float %add1, %arg3
+ %cmp2 = fcmp nnan ole float %arg3, %add2
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test81(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test81:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GCN-NEXT: v_add_f64 v[2:3], v[2:3], 2.0
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan double %arg1, 1.0
+ %add2 = fadd nnan double %arg2, 2.0
+ %cmp1 = fcmp nnan ugt double %add1, %arg3
+ %cmp2 = fcmp nnan ult double %arg3, %add2
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test82(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test82:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ole double %var1, %arg3
+ %cmp2 = fcmp nnan oge double %arg3, %var2
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test83(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test83:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan ule float %var1, %arg3
+ %cmp2 = fcmp nnan uge float %arg3, %var2
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test84(half %arg1, half %arg2, half %arg3) {
+; GCN-TRUE16-LABEL: test84:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l
+; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test84:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1
+; GCN-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+ %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+ %cmp1 = fcmp nnan olt half %var1, %arg3
+ %cmp2 = fcmp nnan olt half %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define <2 x i1> @test85(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GCN-TRUE16-LABEL: test85:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-TRUE16-NEXT: v_pk_min_f16 v1, v0, v1
+; GCN-TRUE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test85:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-FAKE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-FAKE16-NEXT: v_pk_min_f16 v0, v0, v1
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GCN-FAKE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+ %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+ %cmp1 = fcmp nnan ole <2 x half> %var1, %arg3
+ %cmp2 = fcmp nnan ole <2 x half> %var2, %arg3
+ %or1 = or <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %or1
+}
+
+define <2 x i1> @test86(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GCN-TRUE16-LABEL: test86:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v0, v1
+; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test86:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-FAKE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v1
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+ %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+ %cmp1 = fcmp nnan ogt <2 x half> %var1, %arg3
+ %cmp2 = fcmp nnan ogt <2 x half> %var2, %arg3
+ %or1 = or <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %or1
+}
+
+define i1 @test87(half %arg1, half %arg2, half %arg3) {
+; GCN-TRUE16-LABEL: test87:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l
+; GCN-TRUE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test87:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1
+; GCN-FAKE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+ %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+ %cmp1 = fcmp nnan oge half %var1, %arg3
+ %cmp2 = fcmp nnan oge half %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define <2 x i1> @test88(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GCN-TRUE16-LABEL: test88:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-TRUE16-NEXT: v_pk_min_f16 v1, v0, v1
+; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test88:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-FAKE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-FAKE16-NEXT: v_pk_min_f16 v0, v0, v1
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+ %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+ %cmp1 = fcmp nnan ugt <2 x half> %var1, %arg3
+ %cmp2 = fcmp nnan ugt <2 x half> %var2, %arg3
+ %and1 = and <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %and1
+}
+
+define i1 @test89(half %arg1, half %arg2, half %arg3) {
+; GCN-TRUE16-LABEL: test89:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l
+; GCN-TRUE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test89:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1
+; GCN-FAKE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+ %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+ %cmp1 = fcmp nnan uge half %var1, %arg3
+ %cmp2 = fcmp nnan uge half %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test90(half %arg1, half %arg2, half %arg3) {
+; GCN-TRUE16-LABEL: test90:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l
+; GCN-TRUE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test90:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1
+; GCN-FAKE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+ %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+ %cmp1 = fcmp nnan ule half %var1, %arg3
+ %cmp2 = fcmp nnan ule half %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define <2 x i1> @test91(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GCN-TRUE16-LABEL: test91:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v0, v1
+; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test91:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-FAKE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v1
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GCN-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+ %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+ %cmp1 = fcmp nnan ult <2 x half> %var1, %arg3
+ %cmp2 = fcmp nnan ult <2 x half> %var2, %arg3
+ %and1 = and <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %and1
+}
+
+define i1 @test107(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test107:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %cmp3 = fcmp nnan olt float %arg3, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %or2 = or i1 %or1, %cmp3
+ ret i1 %or2
+}
+
+define i1 @test108(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test108:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult float %arg1, %C
+ %cmp2 = fcmp nnan ult float %arg2, %C
+ %cmp3 = fcmp nnan ult float %arg3, %C
+ %and1 = and i1 %cmp1, %cmp2
+ %and2 = and i1 %and1, %cmp3
+ ret i1 %and2
+}
+
+define i1 @test109(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
+; GCN-LABEL: test109:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v1, v4
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %cmp3 = fcmp nnan ogt float %arg3, %C
+ %cmp4 = fcmp nnan ogt float %arg4, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %or2 = or i1 %cmp3, %cmp4
+ %or3 = or i1 %or1, %or2
+ ret i1 %or3
+}
+
+define i1 @test110(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
+; GCN-LABEL: test110:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v5
+; GCN-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v3, v3, v7
+; GCN-NEXT: v_dual_max_f32 v0, v0, v1 :: v_dual_min_f32 v1, v2, v3
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v1, v8
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, %C1
+ %add2 = fadd nnan float %arg2, %C2
+ %add3 = fadd nnan float %arg3, %C3
+ %add4 = fadd nnan float %arg4, %C4
+ %cmp1 = fcmp nnan ult float %add1, %C
+ %cmp2 = fcmp nnan ult float %add2, %C
+ %cmp3 = fcmp nnan ugt float %add3, %C
+ %cmp4 = fcmp nnan ugt float %add4, %C
+ %or1 = and i1 %cmp1, %cmp2
+ %or2 = and i1 %cmp3, %cmp4
+ %or3 = and i1 %or1, %or2
+ ret i1 %or3
+}
+
+define i1 @test111(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
+; GCN-LABEL: test111:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_min_f32_e32 v0, v0, v4
+; GCN-NEXT: v_min3_f32 v0, v5, v6, v0
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %cmp3 = fcmp nnan olt float %arg3, %C
+ %cmp4 = fcmp nnan olt float %arg4, %C
+ %or2 = or i1 %cmp3, %cmp4
+ %cmp5 = fcmp nnan olt float %arg5, %C
+ %or3 = or i1 %or1, %or2
+ %or4 = or i1 %or3, %cmp5
+ %cmp6 = fcmp nnan olt float %arg6, %C
+ %cmp7 = fcmp nnan olt float %arg7, %C
+ %or5 = or i1 %cmp6, %cmp7
+ %cmp8 = fcmp nnan olt float %arg8, %C
+ %or6 = or i1 %or5, %or4
+ %or7 = or i1 %or6, %cmp8
+ ret i1 %or6
+}
+
+define i1 @test112(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
+; GCN-LABEL: test112:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_min_f32_e32 v0, v0, v4
+; GCN-NEXT: v_min3_f32 v0, v5, v6, v0
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %cmp3 = fcmp nnan olt float %arg3, %C
+ %cmp4 = fcmp nnan olt float %arg4, %C
+ %or2 = or i1 %cmp3, %cmp4
+ %cmp5 = fcmp nnan ult float %arg5, %C
+ %or3 = or i1 %or1, %or2
+ %or4 = or i1 %or3, %cmp5
+ %cmp6 = fcmp nnan olt float %arg6, %C
+ %cmp7 = fcmp nnan olt float %arg7, %C
+ %or5 = or i1 %cmp6, %cmp7
+ %cmp8 = fcmp nnan ult float %arg8, %C
+ %or6 = or i1 %or5, %or4
+ %or7 = or i1 %or6, %cmp8
+ ret i1 %or6
+}
+
+define i1 @test113(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test113:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_maxmin_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult float %arg1, %C
+ %cmp2 = fcmp nnan ult float %arg2, %C
+ %cmp3 = fcmp nnan olt float %arg3, %C
+ %and1 = and i1 %cmp1, %cmp2
+ %or1 = or i1 %and1, %cmp3
+ ret i1 %or1
+}
+
+define i1 @test114(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test114:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v2, v3
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v0, v3
+; GCN-NEXT: s_and_b32 s0, s0, vcc_lo
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ogt float %arg1, %C
+ %cmp2 = fcmp nnan ogt float %arg2, %C
+ %cmp3 = fcmp nnan ult float %arg3, %C
+ %and1 = or i1 %cmp1, %cmp2
+ %or1 = and i1 %and1, %cmp3
+ ret i1 %or1
+}
+
+define i1 @test115(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
+; GCN-LABEL: test115:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %var3 = call nnan float @llvm.canonicalize.f32(float %arg3)
+ %var4 = call nnan float @llvm.canonicalize.f32(float %arg4)
+ %cmp3 = fcmp nnan ult float %var3, %C
+ %cmp4 = fcmp nnan ult float %var4, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %and1 = and i1 %cmp3, %cmp4
+ %or2 = or i1 %or1, %and1
+ ret i1 %or2
+}
+
+define i1 @test116(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %C) {
+; GCN-LABEL: test116:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v8, v8, v9
+; GCN-NEXT: v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
+; GCN-NEXT: v_max_f32_e32 v4, v6, v7
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v8
+; GCN-NEXT: v_cmp_gt_f32_e32 vcc_lo, v2, v10
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v3, v10
+; GCN-NEXT: v_cmp_gt_f32_e64 s1, v4, v10
+; GCN-NEXT: v_cmp_lt_f32_e64 s2, v0, v10
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: s_or_b32 s1, s2, vcc_lo
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %cmp3 = fcmp nnan ogt float %arg3, %C
+ %cmp4 = fcmp nnan ogt float %arg4, %C
+ %cmp5 = fcmp nnan olt float %arg5, %C
+ %cmp6 = fcmp nnan olt float %arg6, %C
+ %cmp7 = fcmp nnan ogt float %arg7, %C
+ %cmp8 = fcmp nnan ogt float %arg8, %C
+ %cmp9 = fcmp nnan olt float %arg9, %C
+ %cmp10 = fcmp nnan olt float %arg10, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %or2 = or i1 %cmp3, %cmp4
+ %or3 = or i1 %cmp5, %cmp6
+ %or4 = or i1 %cmp7, %cmp8
+ %or5 = or i1 %cmp9, %cmp10
+ %or6 = or i1 %or1, %or2
+ %or7 = or i1 %or3, %or4
+ %or8 = or i1 %or5, %or6
+ %or9 = or i1 %or7, %or8
+ ret i1 %or9
+}
+
+define i1 @test117(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %arg11, float %arg12, float %C1, float %C2) {
+; GCN-LABEL: test117:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v6, v6, v7
+; GCN-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
+; GCN-NEXT: v_min_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v3, v4, v5, v6
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v12
+; GCN-NEXT: v_min3_f32 v0, v8, v9, v1
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v2, v13
+; GCN-NEXT: v_cmp_lt_f32_e64 s1, v3, v13
+; GCN-NEXT: v_cmp_lt_f32_e64 s2, v0, v12
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: s_or_b32 s0, s2, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C1
+ %cmp2 = fcmp nnan olt float %arg2, %C1
+ %cmp3 = fcmp nnan olt float %arg3, %C2
+ %cmp4 = fcmp nnan olt float %arg4, %C2
+ %cmp5 = fcmp nnan olt float %arg5, %C2
+ %cmp6 = fcmp nnan olt float %arg6, %C2
+ %cmp7 = fcmp nnan olt float %arg7, %C2
+ %cmp8 = fcmp nnan olt float %arg8, %C2
+ %cmp9 = fcmp nnan olt float %arg9, %C1
+ %cmp10 = fcmp nnan olt float %arg10, %C1
+ %cmp11 = fcmp nnan olt float %arg11, %C1
+ %cmp12 = fcmp nnan olt float %arg12, %C1
+ %or1 = or i1 %cmp1, %cmp2
+ %or2 = or i1 %cmp3, %cmp4
+ %or3 = or i1 %cmp5, %cmp6
+ %or4 = or i1 %cmp7, %cmp8
+ %or5 = or i1 %cmp9, %cmp10
+ %or6 = or i1 %cmp11, %cmp12
+ %or7 = or i1 %or1, %or2
+ %or8 = or i1 %or3, %or4
+ %or9 = or i1 %or5, %or6
+ %or10 = or i1 %or7, %or8
+ %or11 = or i1 %or9, %or10
+ ret i1 %or11
+}
+
+
+define i1 @test118(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
+; GCN-LABEL: test118:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v5
+; GCN-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v3, v3, v7
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_max3_f32 v0, v0, v2, v3
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, %C1
+ %add2 = fadd nnan float %arg2, %C2
+ %add3 = fadd nnan float %arg3, %C3
+ %add4 = fadd nnan float %arg4, %C4
+ %cmp1 = fcmp nnan ult float %add1, %C
+ %cmp2 = fcmp nnan ult float %add2, %C
+ %cmp3 = fcmp nnan ult float %add3, %C
+ %cmp4 = fcmp nnan ult float %add4, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %and1 = and i1 %cmp3, %cmp4
+ %and2 = and i1 %or1, %and1
+ ret i1 %and2
+}
+
+define i1 @test119(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
+; GCN-LABEL: test119:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v3, v3, v7
+; GCN-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v5
+; GCN-NEXT: v_min_f32_e32 v2, v2, v3
+; GCN-NEXT: v_minmax_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, %C1
+ %add2 = fadd nnan float %arg2, %C2
+ %add3 = fadd nnan float %arg3, %C3
+ %add4 = fadd nnan float %arg4, %C4
+ %cmp1 = fcmp nnan ult float %add1, %C
+ %cmp2 = fcmp nnan ult float %add2, %C
+ %cmp3 = fcmp nnan ult float %add3, %C
+ %cmp4 = fcmp nnan ult float %add4, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %and1 = or i1 %cmp3, %cmp4
+ %and2 = and i1 %or1, %and1
+ ret i1 %and2
+}
+
+define i1 @test120(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
+; GCN-LABEL: test120:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v3, v3, v7
+; GCN-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v5
+; GCN-NEXT: v_max_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, %C1
+ %add2 = fadd nnan float %arg2, %C2
+ %add3 = fadd nnan float %arg3, %C3
+ %add4 = fadd nnan float %arg4, %C4
+ %cmp1 = fcmp nnan ult float %add1, %C
+ %cmp2 = fcmp nnan ult float %add2, %C
+ %cmp3 = fcmp nnan ult float %add3, %C
+ %cmp4 = fcmp nnan ult float %add4, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %and1 = and i1 %cmp3, %cmp4
+ %and2 = or i1 %or1, %and1
+ ret i1 %and2
+}
+
+define i1 @test121(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
+; GCN-LABEL: test121:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v3, v3, v7
+; GCN-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v5
+; GCN-NEXT: v_max_f32_e32 v2, v2, v3
+; GCN-NEXT: v_maxmin_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, %C1
+ %add2 = fadd nnan float %arg2, %C2
+ %add3 = fadd nnan float %arg3, %C3
+ %add4 = fadd nnan float %arg4, %C4
+ %cmp1 = fcmp nnan ult float %add1, %C
+ %cmp2 = fcmp nnan ult float %add2, %C
+ %cmp3 = fcmp nnan ult float %add3, %C
+ %cmp4 = fcmp nnan ult float %add4, %C
+ %or1 = and i1 %cmp1, %cmp2
+ %and1 = and i1 %cmp3, %cmp4
+ %and2 = or i1 %or1, %and1
+ ret i1 %and2
+}
+
+define i1 @test122(double %arg1, double %arg2, double %arg3) #1 {
+; GCN-LABEL: test122:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult double %arg1, %arg3
+ %cmp2 = fcmp nnan ult double %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test123(double %arg1, double %arg2, double %arg3) #1 {
+; GCN-LABEL: test123:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ogt double %var1, %arg3
+ %cmp2 = fcmp nnan ogt double %var2, %arg3
+ %or1 = and i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test134(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test134:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %arg3
+ %cmp2 = fcmp nnan ogt float %arg3, %arg2
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test135(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test135:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult float %arg1, %arg3
+ %cmp2 = fcmp nnan ugt float %arg3, %arg2
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test136(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test136:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ole double %var1, %arg3
+ %cmp2 = fcmp nnan oge double %arg3, %var2
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test137(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test137:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan ule float %var1, %arg3
+ %cmp2 = fcmp nnan uge float %arg3, %var2
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test138(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test138:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %arg3
+ %cmp2 = fcmp nnan olt float %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test139(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test139:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ole double %arg1, %arg3
+ %cmp2 = fcmp nnan ole double %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test140(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test140:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ogt double %arg1, %arg3
+ %cmp2 = fcmp nnan ogt double %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test141(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test141:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan oge float %arg1, %arg3
+ %cmp2 = fcmp nnan oge float %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test142(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test142:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ugt double %arg1, %arg3
+ %cmp2 = fcmp nnan ugt double %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test143(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test143:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan uge float %arg1, %arg3
+ %cmp2 = fcmp nnan uge float %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test144(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test144:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ule float %arg1, %arg3
+ %cmp2 = fcmp nnan ule float %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test145(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test145:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult double %arg1, %arg3
+ %cmp2 = fcmp nnan ult double %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test146(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test146:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan olt float %var1, %arg3
+ %cmp2 = fcmp nnan olt float %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test147(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test147:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ole double %var1, %arg3
+ %cmp2 = fcmp nnan ole double %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test148(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test148:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ogt double %var1, %arg3
+ %cmp2 = fcmp nnan ogt double %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test149(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test149:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan oge float %var1, %arg3
+ %cmp2 = fcmp nnan oge float %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test150(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test150:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ugt double %var1, %arg3
+ %cmp2 = fcmp nnan ugt double %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test151(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test151:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan uge float %var1, %arg3
+ %cmp2 = fcmp nnan uge float %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test152(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test152:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan ule float %var1, %arg3
+ %cmp2 = fcmp nnan ule float %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test153(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test153:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ult double %var1, %arg3
+ %cmp2 = fcmp nnan ult double %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+declare double @llvm.canonicalize.f64(double)
+declare float @llvm.canonicalize.f32(float)
+declare half @llvm.canonicalize.f16(half)
+declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>)
+
+attributes #0 = { nounwind "amdgpu-ieee"="false" }
+attributes #1 = { nounwind "no-nans-fp-math"="true" }
diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll
index f3a84e6e45260..0537e8c2ed59b 100644
--- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll
@@ -57,9 +57,9 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32_fast(ptr addrspace(1) %out,
; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]]
; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]]
-; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]]
+; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]]
-; VI: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
+; VI: v_cmp_ge_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]]
diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
index 39eefa1879870..b478e9a0830eb 100644
--- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
@@ -81,9 +81,9 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out
; VI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
; VI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
-; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]]
+; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]]
-; VI: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
+; VI: v_cmp_le_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
; VI: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc
define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) %out, float %a, float %b) #0 {
%a.nnan = fadd nnan float %a, 1.0
diff --git a/llvm/test/CodeGen/AMDGPU/fold-fabs.ll b/llvm/test/CodeGen/AMDGPU/fold-fabs.ll
index bb1f01b641aee..8e65f9a5e7f70 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-fabs.ll
+++ b/llvm/test/CodeGen/AMDGPU/fold-fabs.ll
@@ -9,7 +9,7 @@ define float @fold_abs_in_branch(float %arg1, float %arg2) {
; GFX10-NEXT: s_mov_b32 s4, exec_lo
; GFX10-NEXT: v_add_f32_e32 v1, v0, v1
; GFX10-NEXT: v_add_f32_e64 v0, |v1|, |v1|
-; GFX10-NEXT: v_cmpx_nlt_f32_e32 1.0, v0
+; GFX10-NEXT: v_cmpx_ge_f32_e32 1.0, v0
; GFX10-NEXT: ; %bb.1: ; %if
; GFX10-NEXT: v_mul_f32_e64 v0, 0x3e4ccccd, |v1|
; GFX10-NEXT: ; %bb.2: ; %exit
@@ -40,7 +40,7 @@ define float @fold_abs_in_branch_multiple_users(float %arg1, float %arg2) {
; GFX10-NEXT: s_mov_b32 s4, exec_lo
; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
; GFX10-NEXT: v_add_f32_e64 v1, |v0|, |v0|
-; GFX10-NEXT: v_cmpx_nlt_f32_e32 1.0, v1
+; GFX10-NEXT: v_cmpx_ge_f32_e32 1.0, v1
; GFX10-NEXT: ; %bb.1: ; %if
; GFX10-NEXT: v_mul_f32_e64 v1, 0x3e4ccccd, |v0|
; GFX10-NEXT: ; %bb.2: ; %exit
@@ -126,7 +126,7 @@ define float @fold_abs_in_branch_fabs(float %arg1, float %arg2) {
; GFX10-NEXT: s_mov_b32 s4, exec_lo
; GFX10-NEXT: v_add_f32_e32 v1, v0, v1
; GFX10-NEXT: v_add_f32_e64 v0, |v1|, |v1|
-; GFX10-NEXT: v_cmpx_nlt_f32_e32 1.0, v0
+; GFX10-NEXT: v_cmpx_ge_f32_e32 1.0, v0
; GFX10-NEXT: ; %bb.1: ; %if
; GFX10-NEXT: v_mul_f32_e64 v0, 0x3e4ccccd, |v1|
; GFX10-NEXT: ; %bb.2: ; %exit
@@ -158,7 +158,7 @@ define float @fold_abs_in_branch_phi(float %arg1, float %arg2) {
; GFX10-NEXT: s_mov_b32 s4, exec_lo
; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
; GFX10-NEXT: v_add_f32_e64 v0, |v0|, |v0|
-; GFX10-NEXT: v_cmpx_nlt_f32_e32 1.0, v0
+; GFX10-NEXT: v_cmpx_ge_f32_e32 1.0, v0
; GFX10-NEXT: s_cbranch_execz .LBB5_3
; GFX10-NEXT: ; %bb.1: ; %header.preheader
; GFX10-NEXT: ; implicit-def: $vgpr0
@@ -202,7 +202,7 @@ define float @fold_neg_in_branch(float %arg1, float %arg2) {
; GFX10-NEXT: s_mov_b32 s4, exec_lo
; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
; GFX10-NEXT: v_mov_b32_e32 v1, v0
-; GFX10-NEXT: v_cmpx_nlt_f32_e32 1.0, v0
+; GFX10-NEXT: v_cmpx_ge_f32_e32 1.0, v0
; GFX10-NEXT: ; %bb.1: ; %if
; GFX10-NEXT: v_rcp_f32_e64 v1, -v0
; GFX10-NEXT: v_mul_f32_e64 v1, |v0|, v1
diff --git a/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll b/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll
index 0cf26be3ac24f..77d62a3a9a8cd 100644
--- a/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll
+++ b/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll
@@ -105,7 +105,7 @@ define amdgpu_ps { <4 x float> } @test_return_to_epilog_with_optimized_kill(floa
; GCN-NEXT: {{ $}}
; GCN-NEXT: renamable $vgpr1 = nofpexcept V_RCP_F32_e32 $vgpr0, implicit $mode, implicit $exec
; GCN-NEXT: $sgpr0_sgpr1 = S_MOV_B64 $exec
- ; GCN-NEXT: nofpexcept V_CMP_NGT_F32_e32 0, killed $vgpr1, implicit-def $vcc, implicit $mode, implicit $exec
+ ; GCN-NEXT: nofpexcept V_CMP_LE_F32_e32 0, killed $vgpr1, implicit-def $vcc, implicit $mode, implicit $exec
; GCN-NEXT: $sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 killed $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
; GCN-NEXT: renamable $sgpr2_sgpr3 = S_XOR_B64 $exec, killed renamable $sgpr2_sgpr3, implicit-def dead $scc
; GCN-NEXT: S_CBRANCH_EXECNZ %bb.3, implicit $exec
>From 8348d7ea568d557aa9c090be83b72ed8e8fcf093 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 1 Dec 2025 10:27:43 +0800
Subject: [PATCH 09/24] fix setcc-fp in rv
---
llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll | 224 ++++++++++++------------
1 file changed, 112 insertions(+), 112 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
index 0b16eb2a17a8f..a17fe2eeff6dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
@@ -82,7 +82,7 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_oeq_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -101,7 +101,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_oeq_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -183,7 +183,7 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_ogt_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -202,7 +202,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_ogt_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -284,7 +284,7 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_oge_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -303,7 +303,7 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_oge_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -385,7 +385,7 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_olt_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -404,7 +404,7 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_olt_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -486,7 +486,7 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_ole_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_ole_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -599,7 +599,7 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_one_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -618,7 +618,7 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_one_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -720,7 +720,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_ord_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -745,7 +745,7 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_ord_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -846,7 +846,7 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_ueq_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -865,7 +865,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_ueq_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -953,7 +953,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_ugt_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -972,7 +972,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_ugt_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -1060,7 +1060,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_uge_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1079,7 +1079,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_uge_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -1167,7 +1167,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_ult_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1186,7 +1186,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_ult_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -1274,7 +1274,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_ule_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1293,7 +1293,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_ule_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -1375,7 +1375,7 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_une_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1394,7 +1394,7 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_une_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -1496,7 +1496,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; ZVFBFMIN-LABEL: fcmp_uno_vv_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1521,7 +1521,7 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; ZVFBFMIN-LABEL: fcmp_uno_vf_nxv8bf16_nonans:
; ZVFBFMIN: # %bb.0:
; ZVFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
@@ -1610,7 +1610,7 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_oeq_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1629,7 +1629,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_oeq_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1711,7 +1711,7 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ogt_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1730,7 +1730,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ogt_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1812,7 +1812,7 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_oge_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1831,7 +1831,7 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_oge_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1913,7 +1913,7 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_olt_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1932,7 +1932,7 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_olt_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2014,7 +2014,7 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ole_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2033,7 +2033,7 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ole_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2127,7 +2127,7 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_one_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2146,7 +2146,7 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_one_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2248,7 +2248,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ord_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2273,7 +2273,7 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ord_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2374,7 +2374,7 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ueq_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2393,7 +2393,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ueq_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2481,7 +2481,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ugt_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2500,7 +2500,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ugt_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2588,7 +2588,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_uge_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2607,7 +2607,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_uge_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2695,7 +2695,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ult_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2714,7 +2714,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ult_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2802,7 +2802,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ule_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2821,7 +2821,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ule_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2903,7 +2903,7 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_une_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2922,7 +2922,7 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_une_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -3024,7 +3024,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_uno_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -3049,7 +3049,7 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_uno_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -3111,7 +3111,7 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_oeq_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3121,7 +3121,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_oeq_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3167,7 +3167,7 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ogt_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3177,7 +3177,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ogt_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3223,7 +3223,7 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_oge_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3233,7 +3233,7 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_oge_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3279,7 +3279,7 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_olt_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3289,7 +3289,7 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_olt_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3335,7 +3335,7 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ole_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3345,7 +3345,7 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ole_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3397,7 +3397,7 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_one_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3407,7 +3407,7 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_one_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3461,7 +3461,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ord_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3473,7 +3473,7 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ord_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3528,7 +3528,7 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ueq_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3538,7 +3538,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ueq_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3587,7 +3587,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ugt_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3597,7 +3597,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ugt_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3646,7 +3646,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_uge_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3656,7 +3656,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_uge_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3705,7 +3705,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ult_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3715,7 +3715,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ult_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3764,7 +3764,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ule_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3774,7 +3774,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ule_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3820,7 +3820,7 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_une_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3830,7 +3830,7 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_une_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3884,7 +3884,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_uno_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3896,7 +3896,7 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_uno_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3945,7 +3945,7 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_oeq_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3955,7 +3955,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_oeq_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4001,7 +4001,7 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ogt_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4011,7 +4011,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ogt_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4057,7 +4057,7 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_oge_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4067,7 +4067,7 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_oge_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4113,7 +4113,7 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_olt_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4123,7 +4123,7 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_olt_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4169,7 +4169,7 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ole_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4179,7 +4179,7 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ole_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4231,7 +4231,7 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_one_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4241,7 +4241,7 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_one_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4295,7 +4295,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ord_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4307,7 +4307,7 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ord_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4362,7 +4362,7 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ueq_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4372,7 +4372,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ueq_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4421,7 +4421,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ugt_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4431,7 +4431,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ugt_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4480,7 +4480,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_uge_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4490,7 +4490,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_uge_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4539,7 +4539,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ult_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4549,7 +4549,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ult_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4598,7 +4598,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ule_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4608,7 +4608,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ule_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4654,7 +4654,7 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_une_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4664,7 +4664,7 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_une_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4718,7 +4718,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_uno_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4730,7 +4730,7 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_uno_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
>From de5dd2b8ff861d13b5fa308c19553d7208936f98 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 1 Dec 2025 10:47:47 +0800
Subject: [PATCH 10/24] Regenerate test/CodeGen/X86/avx10_2-cmp.ll
---
llvm/test/CodeGen/X86/avx10_2-cmp.ll | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/X86/avx10_2-cmp.ll b/llvm/test/CodeGen/X86/avx10_2-cmp.ll
index 566ce533683f7..8117345d9de04 100644
--- a/llvm/test/CodeGen/X86/avx10_2-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx10_2-cmp.ll
@@ -281,14 +281,14 @@ define i1 @constrained_fcmp() {
; X64-LABEL: constrained_fcmp:
; X64: # %bb.0: # %entry
; X64-NEXT: vxorpd %xmm0, %xmm0, %xmm0
-; X64-NEXT: vucomxsd %xmm0, %xmm0
+; X64-NEXT: vcomisd %xmm0, %xmm0
; X64-NEXT: setne %al
; X64-NEXT: retq
;
; X86-LABEL: constrained_fcmp:
; X86: # %bb.0: # %entry
; X86-NEXT: vxorpd %xmm0, %xmm0, %xmm0
-; X86-NEXT: vucomxsd %xmm0, %xmm0
+; X86-NEXT: vcomisd %xmm0, %xmm0
; X86-NEXT: setne %al
; X86-NEXT: retl
entry:
>From d91f7f0593e097153f354574f15fcb644fdf4ab8 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 1 Dec 2025 11:16:06 +0800
Subject: [PATCH 11/24] fix tests
---
llvm/test/CodeGen/AArch64/known-never-nan.ll | 2 +-
llvm/test/CodeGen/AMDGPU/dagcombine-select.ll | 4 ++--
llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll | 4 ++--
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/known-never-nan.ll b/llvm/test/CodeGen/AArch64/known-never-nan.ll
index d024f713a86ca..d522449484981 100644
--- a/llvm/test/CodeGen/AArch64/known-never-nan.ll
+++ b/llvm/test/CodeGen/AArch64/known-never-nan.ll
@@ -12,7 +12,7 @@ define float @fmaxnm(i32 %i1, i32 %i2) #0 {
; CHECK-NEXT: fadd s0, s0, s2
; CHECK-NEXT: fadd s1, s1, s3
; CHECK-NEXT: fcmp s0, s1
-; CHECK-NEXT: fcsel s0, s0, s1, pl
+; CHECK-NEXT: fcsel s0, s0, s1, ge
; CHECK-NEXT: ret
%f1 = uitofp i32 %i1 to float
%fadd1 = fadd float %f1, 11.0
diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
index 3327ef0514fcd..65a1443f83c6e 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
@@ -591,7 +591,7 @@ define amdgpu_kernel void @frem_constant_sel_constants(ptr addrspace(1) %p, i1 %
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v1, v0, -4.0, s[0:1]
; GFX9-NEXT: s_mov_b32 s0, 0x40a00000
-; GFX9-NEXT: v_cmp_nlt_f32_e64 s[2:3], |v1|, s0
+; GFX9-NEXT: v_cmp_ge_f32_e64 s[2:3], |v1|, s0
; GFX9-NEXT: s_and_b64 vcc, exec, s[2:3]
; GFX9-NEXT: s_cbranch_vccz .LBB26_2
; GFX9-NEXT: ; %bb.1: ; %frem.else
@@ -665,7 +665,7 @@ define amdgpu_kernel void @frem_constant_sel_constants(ptr addrspace(1) %p, i1 %
; GFX942-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX942-NEXT: v_cndmask_b32_e64 v1, v0, -4.0, s[0:1]
; GFX942-NEXT: s_mov_b32 s0, 0x40a00000
-; GFX942-NEXT: v_cmp_nlt_f32_e64 s[2:3], |v1|, s0
+; GFX942-NEXT: v_cmp_ge_f32_e64 s[2:3], |v1|, s0
; GFX942-NEXT: s_and_b64 vcc, exec, s[2:3]
; GFX942-NEXT: s_cbranch_vccz .LBB26_2
; GFX942-NEXT: ; %bb.1: ; %frem.else
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
index 9b44acd5c0716..64431cb31ea6e 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
@@ -1680,7 +1680,7 @@ define amdgpu_kernel void @fnge_select_f32_multi_use_regression(float %.i2369) {
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_cmp_nlt_f32_e64 s[0:1], s0, 0
; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; GCN-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0
; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v0, vcc
; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-NEXT: v_cmp_le_f32_e32 vcc, 0, v0
@@ -1694,7 +1694,7 @@ define amdgpu_kernel void @fnge_select_f32_multi_use_regression(float %.i2369) {
; GFX11-NEXT: v_cmp_nlt_f32_e64 s0, s0, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, 0, v0
; GFX11-NEXT: v_cndmask_b32_e32 v1, 0, v0, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mul_f32_e64 v0, -v0, v1
>From 399a3b9dd7659a9af44577293106b00aff0faf91 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 1 Dec 2025 16:55:10 +0800
Subject: [PATCH 12/24] Fix CodeGen/ARM/fp16-vminmaxnm-safe.ll
---
llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll b/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
index 52fe5ce1a8a5f..3dcc828167d16 100644
--- a/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
+++ b/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
@@ -483,9 +483,9 @@ define half @fp16_vminmaxnm_neg0(half %a) {
; CHECK-NEXT: vldr.16 s0, .LCPI23_0
; CHECK-NEXT: vmov.f16 s2, r0
; CHECK-NEXT: vminnm.f16 s2, s2, s0
-; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vcmp.f16 s2, s0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vselge.f16 s0, s0, s2
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 1
>From 5ec547829e163039bd8b9759ed67e8e884d0555c Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Thu, 4 Dec 2025 18:50:31 +0800
Subject: [PATCH 13/24] use nofpclass in setcc-fp.ll
---
llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll | 48 ++++++++++++-------------
1 file changed, 24 insertions(+), 24 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
index a17fe2eeff6dd..7fbbfb39a8253 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
@@ -635,7 +635,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofp
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan one <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp one <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -882,7 +882,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofp
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ueq <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp ueq <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -989,7 +989,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofp
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ugt <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp ugt <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -1096,7 +1096,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofp
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan uge <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp uge <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -1203,7 +1203,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofp
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ult <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp ult <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -1310,7 +1310,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofp
; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ule <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp ule <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2163,7 +2163,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> nofpcla
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan one <vscale x 8 x half> %va, %splat
+ %vc = fcmp one <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2410,7 +2410,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> nofpcla
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ueq <vscale x 8 x half> %va, %splat
+ %vc = fcmp ueq <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2517,7 +2517,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> nofpcla
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ugt <vscale x 8 x half> %va, %splat
+ %vc = fcmp ugt <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2624,7 +2624,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> nofpcla
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan uge <vscale x 8 x half> %va, %splat
+ %vc = fcmp uge <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2731,7 +2731,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> nofpcla
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ult <vscale x 8 x half> %va, %splat
+ %vc = fcmp ult <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2838,7 +2838,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> nofpcla
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ule <vscale x 8 x half> %va, %splat
+ %vc = fcmp ule <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3415,7 +3415,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> nofpcl
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan one <vscale x 8 x float> %va, %splat
+ %vc = fcmp one <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3546,7 +3546,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> nofpcl
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ueq <vscale x 8 x float> %va, %splat
+ %vc = fcmp ueq <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3605,7 +3605,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> nofpcl
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ugt <vscale x 8 x float> %va, %splat
+ %vc = fcmp ugt <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3664,7 +3664,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> nofpcl
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan uge <vscale x 8 x float> %va, %splat
+ %vc = fcmp uge <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3723,7 +3723,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> nofpcl
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ult <vscale x 8 x float> %va, %splat
+ %vc = fcmp ult <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3782,7 +3782,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> nofpcl
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ule <vscale x 8 x float> %va, %splat
+ %vc = fcmp ule <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4249,7 +4249,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> nofpc
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan one <vscale x 8 x double> %va, %splat
+ %vc = fcmp one <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4380,7 +4380,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> nofpc
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ueq <vscale x 8 x double> %va, %splat
+ %vc = fcmp ueq <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4439,7 +4439,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> nofpc
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ugt <vscale x 8 x double> %va, %splat
+ %vc = fcmp ugt <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4498,7 +4498,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> nofpc
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan uge <vscale x 8 x double> %va, %splat
+ %vc = fcmp uge <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4557,7 +4557,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> nofpc
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ult <vscale x 8 x double> %va, %splat
+ %vc = fcmp ult <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4616,7 +4616,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> nofpc
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp nnan ule <vscale x 8 x double> %va, %splat
+ %vc = fcmp ule <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
>From 1bbf320eb5cf8f86bae617031ac4ff1a0d3109b2 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Thu, 4 Dec 2025 20:04:39 +0800
Subject: [PATCH 14/24] Use single bool argument parameter
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 3e1aede08560c..9e28d930164fe 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6704,16 +6704,14 @@ static unsigned getMinMaxOpcodeForClamp(bool IsMin, SDValue Operand1,
// FIXME: use FMINIMUMNUM if possible, such as for RISC-V.
static unsigned getMinMaxOpcodeForCompareFold(
- SDValue Operand1, SDValue Operand2, SDNodeFlags LHSSetCCFlags,
- SDNodeFlags RHSSetCCFlags, ISD::CondCode CC, unsigned OrAndOpcode,
- SelectionDAG &DAG, bool isFMAXNUMFMINNUM_IEEE, bool isFMAXNUMFMINNUM) {
+ SDValue Operand1, SDValue Operand2, bool SetCCNoNaNs, ISD::CondCode CC,
+ unsigned OrAndOpcode, SelectionDAG &DAG, bool isFMAXNUMFMINNUM_IEEE,
+ bool isFMAXNUMFMINNUM) {
// The optimization cannot be applied for all the predicates because
// of the way FMINNUM/FMAXNUM and FMINNUM_IEEE/FMAXNUM_IEEE handle
// NaNs. For FMINNUM_IEEE/FMAXNUM_IEEE, the optimization cannot be
// applied at all if one of the operands is a signaling NaN.
- bool SetCCNoNaNs = LHSSetCCFlags.hasNoNaNs() && RHSSetCCFlags.hasNoNaNs();
-
// It is safe to use FMINNUM_IEEE/FMAXNUM_IEEE if all the operands
// are non NaN values.
if (((CC == ISD::SETLT || CC == ISD::SETLE) && (OrAndOpcode == ISD::OR)) ||
@@ -6869,8 +6867,9 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
NewOpcode = IsSigned ? ISD::SMAX : ISD::UMAX;
} else if (OpVT.isFloatingPoint())
NewOpcode = getMinMaxOpcodeForCompareFold(
- Operand1, Operand2, CC, LogicOp->getOpcode(), DAG,
- isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM);
+ Operand1, Operand2,
+ LHSSetCCFlags.hasNoNaNs() && RHSSetCCFlags.hasNoNaNs(), CC,
+ LogicOp->getOpcode(), DAG, isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM);
if (NewOpcode != ISD::DELETED_NODE) {
// Propagate fast-math flags from setcc.
>From ff59c585ded9cc63bb3b5a732d001a62bcee5fbb Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sat, 3 Jan 2026 20:40:37 +0800
Subject: [PATCH 15/24] start with lower case in `areFCmpOperandsNonNaN`
---
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 644487864e8ca..7105234addd49 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2447,7 +2447,7 @@ static bool InBlock(const Value *V, const BasicBlock *BB) {
return true;
}
-static bool AreFCmpOperandsNonNaN(const Instruction *Inst,
+static bool areFCmpOperandsNonNaN(const Instruction *Inst,
const SelectionDAG &DAG) {
assert(
(isa<FCmpInst>(Inst) || isa<ConstrainedFPCmpIntrinsic>(Inst) ||
@@ -2500,7 +2500,7 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
FCmpInst::Predicate Pred =
InvertCond ? FC->getInversePredicate() : FC->getPredicate();
Condition = getFCmpCondCode(Pred);
- if (AreFCmpOperandsNonNaN(FC, DAG))
+ if (areFCmpOperandsNonNaN(FC, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
}
@@ -3813,7 +3813,7 @@ void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
ISD::CondCode Condition = getFCmpCondCode(predicate);
auto *FPMO = cast<FPMathOperator>(&I);
- if (AreFCmpOperandsNonNaN(&I, DAG))
+ if (areFCmpOperandsNonNaN(&I, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
SDNodeFlags Flags;
@@ -8554,7 +8554,7 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
case ISD::STRICT_FSETCCS: {
auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
- if (AreFCmpOperandsNonNaN(FPCmp, DAG))
+ if (areFCmpOperandsNonNaN(FPCmp, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
Opers.push_back(DAG.getCondCode(Condition));
break;
@@ -8838,7 +8838,7 @@ void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
if (IsFP) {
Condition = getFCmpCondCode(CondCode);
- if (AreFCmpOperandsNonNaN(&VPIntrin, DAG))
+ if (areFCmpOperandsNonNaN(&VPIntrin, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
} else {
Condition = getICmpCondCode(CondCode);
>From 5c200e93ffa5ab5c8b31f3b906ca7fb6121913b1 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sat, 3 Jan 2026 20:54:33 +0800
Subject: [PATCH 16/24] use SDNodeFlags directly as possible
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 9e28d930164fe..07ce4c1658ed3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6873,10 +6873,10 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
if (NewOpcode != ISD::DELETED_NODE) {
// Propagate fast-math flags from setcc.
- SelectionDAG::FlagInserter FlagInserter(DAG, LHS->getFlags() &
- RHS->getFlags());
+ SDNodeFlags Flags = LHS->getFlags() & RHS->getFlags();
SDValue MinMaxValue =
- DAG.getNode(NewOpcode, DL, OpVT, Operand1, Operand2);
+ DAG.getNode(NewOpcode, DL, OpVT, Operand1, Operand2, Flags);
+ SelectionDAG::FlagInserter FlagInserter(DAG, Flags);
return DAG.getSetCC(DL, VT, MinMaxValue, CommonValue, CC);
}
}
>From 340969ac5752ecb1c5e629193fb8dc2f1c8be57e Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 5 Jan 2026 18:37:11 +0800
Subject: [PATCH 17/24] use analysis from codegen as possible
---
.../SelectionDAG/SelectionDAGBuilder.cpp | 42 ++++++-------------
1 file changed, 12 insertions(+), 30 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 7105234addd49..30d9dc3110103 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2447,26 +2447,6 @@ static bool InBlock(const Value *V, const BasicBlock *BB) {
return true;
}
-static bool areFCmpOperandsNonNaN(const Instruction *Inst,
- const SelectionDAG &DAG) {
- assert(
- (isa<FCmpInst>(Inst) || isa<ConstrainedFPCmpIntrinsic>(Inst) ||
- (isa<VPIntrinsic>(Inst) &&
- dyn_cast<VPIntrinsic>(Inst)->getIntrinsicID() == Intrinsic::vp_fcmp)) &&
- "Not fcmp instruction or its intrinsic variants!");
-
- if (const auto *FPOp = dyn_cast<FPMathOperator>(Inst))
- if (FPOp->hasNoNaNs())
- return true;
-
- for (int I = 0; I != 2; ++I)
- if (!isKnownNeverNaN(Inst->getOperand(I),
- SimplifyQuery(DAG.getDataLayout(), Inst)))
- return false;
-
- return true;
-}
-
/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
/// This function emits a branch and is used at the leaves of an OR or an
/// AND operator tree.
@@ -2500,7 +2480,12 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
FCmpInst::Predicate Pred =
InvertCond ? FC->getInversePredicate() : FC->getPredicate();
Condition = getFCmpCondCode(Pred);
- if (areFCmpOperandsNonNaN(FC, DAG))
+
+ if (FC->hasNoNaNs() ||
+ (isKnownNeverNaN(FC->getOperand(0),
+ SimplifyQuery(DAG.getDataLayout(), FC)) &&
+ isKnownNeverNaN(FC->getOperand(1),
+ SimplifyQuery(DAG.getDataLayout(), FC))))
Condition = getFCmpCodeWithoutNaN(Condition);
}
@@ -3813,7 +3798,8 @@ void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
ISD::CondCode Condition = getFCmpCondCode(predicate);
auto *FPMO = cast<FPMathOperator>(&I);
- if (areFCmpOperandsNonNaN(&I, DAG))
+ if (FPMO->hasNoNaNs() ||
+ (DAG.isKnownNeverNaN(Op1) && DAG.isKnownNeverNaN(Op2)))
Condition = getFCmpCodeWithoutNaN(Condition);
SDNodeFlags Flags;
@@ -8554,7 +8540,7 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
case ISD::STRICT_FSETCCS: {
auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
- if (areFCmpOperandsNonNaN(FPCmp, DAG))
+ if (DAG.isKnownNeverNaN(Opers[1]) && DAG.isKnownNeverNaN(Opers[2]))
Condition = getFCmpCodeWithoutNaN(Condition);
Opers.push_back(DAG.getCondCode(Condition));
break;
@@ -8836,13 +8822,7 @@ void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
ISD::CondCode Condition;
CmpInst::Predicate CondCode = VPIntrin.getPredicate();
bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
- if (IsFP) {
- Condition = getFCmpCondCode(CondCode);
- if (areFCmpOperandsNonNaN(&VPIntrin, DAG))
- Condition = getFCmpCodeWithoutNaN(Condition);
- } else {
- Condition = getICmpCondCode(CondCode);
- }
+ Condition = IsFP ? getFCmpCondCode(CondCode) : getICmpCondCode(CondCode);
SDValue Op1 = getValue(VPIntrin.getOperand(0));
SDValue Op2 = getValue(VPIntrin.getOperand(1));
@@ -8856,6 +8836,8 @@ void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
VPIntrin.getType());
+ if (DAG.isKnownNeverNaN(Op1) && DAG.isKnownNeverNaN(Op2))
+ Condition = getFCmpCodeWithoutNaN(Condition);
setValue(&VPIntrin,
DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
}
>From c157e72418f5f4933dde51022e534d0caa7acfdd Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 5 Jan 2026 19:48:19 +0800
Subject: [PATCH 18/24] handle SPLAT_VECTOR in isKnownNeverNaN
---
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 2 ++
1 file changed, 2 insertions(+)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 4823f77a61c4a..250b3d2702311 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -6046,6 +6046,8 @@ bool SelectionDAG::isKnownNeverNaN(SDValue Op, const APInt &DemandedElts,
return false;
return true;
}
+ case ISD::SPLAT_VECTOR:
+ return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
case ISD::AssertNoFPClass: {
FPClassTest NoFPClass =
static_cast<FPClassTest>(Op.getConstantOperandVal(1));
>From 9bd766f240dc8c0396959b15c1185985a3fe0b43 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 5 Jan 2026 19:48:33 +0800
Subject: [PATCH 19/24] fix tests
---
.../SelectionDAG/SelectionDAGBuilder.cpp | 1 -
llvm/test/CodeGen/AArch64/known-never-nan.ll | 2 +-
.../CodeGen/AMDGPU/combine_andor_with_cmps.ll | 12 ++--
.../CodeGen/AMDGPU/fneg-modifier-casting.ll | 4 +-
llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll | 16 ++---
llvm/test/CodeGen/PowerPC/scalar_cmp.ll | 72 +++++++------------
6 files changed, 42 insertions(+), 65 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 30d9dc3110103..d4131a9151740 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2480,7 +2480,6 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
FCmpInst::Predicate Pred =
InvertCond ? FC->getInversePredicate() : FC->getPredicate();
Condition = getFCmpCondCode(Pred);
-
if (FC->hasNoNaNs() ||
(isKnownNeverNaN(FC->getOperand(0),
SimplifyQuery(DAG.getDataLayout(), FC)) &&
diff --git a/llvm/test/CodeGen/AArch64/known-never-nan.ll b/llvm/test/CodeGen/AArch64/known-never-nan.ll
index d522449484981..d024f713a86ca 100644
--- a/llvm/test/CodeGen/AArch64/known-never-nan.ll
+++ b/llvm/test/CodeGen/AArch64/known-never-nan.ll
@@ -12,7 +12,7 @@ define float @fmaxnm(i32 %i1, i32 %i2) #0 {
; CHECK-NEXT: fadd s0, s0, s2
; CHECK-NEXT: fadd s1, s1, s3
; CHECK-NEXT: fcmp s0, s1
-; CHECK-NEXT: fcsel s0, s0, s1, ge
+; CHECK-NEXT: fcsel s0, s0, s1, pl
; CHECK-NEXT: ret
%f1 = uitofp i32 %i1 to float
%fadd1 = fadd float %f1, 11.0
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
index 114392c688f94..42245e3d7013d 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
@@ -2441,10 +2441,9 @@ define i1 @test122(double %arg1, double %arg2, double %arg3) #1 {
; GCN-LABEL: test122:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
-; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
-; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult double %arg1, %arg3
%cmp2 = fcmp ult double %arg2, %arg3
@@ -2458,10 +2457,9 @@ define i1 @test123(double %arg1, double %arg2, double %arg3) #1 {
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
-; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
-; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
index 64431cb31ea6e..9b44acd5c0716 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
@@ -1680,7 +1680,7 @@ define amdgpu_kernel void @fnge_select_f32_multi_use_regression(float %.i2369) {
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_cmp_nlt_f32_e64 s[0:1], s0, 0
; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; GCN-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v0, vcc
; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-NEXT: v_cmp_le_f32_e32 vcc, 0, v0
@@ -1694,7 +1694,7 @@ define amdgpu_kernel void @fnge_select_f32_multi_use_regression(float %.i2369) {
; GFX11-NEXT: v_cmp_nlt_f32_e64 s0, s0, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, 0, v0
; GFX11-NEXT: v_cndmask_b32_e32 v1, 0, v0, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mul_f32_e64 v0, -v0, v1
diff --git a/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll b/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
index 3dcc828167d16..2185bd8a2a138 100644
--- a/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
+++ b/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
@@ -253,9 +253,9 @@ define half @fp16_vminnm_NNNu(half %b) {
; CHECK-NEXT: vmov.f16 s2, #1.200000e+01
; CHECK-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NEXT: vldr.16 s2, .LCPI14_0
-; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vcmp.f16 s2, s0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vselgt.f16 s0, s0, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 1
@@ -277,9 +277,9 @@ define half @fp16_vminnm_NNNule(half %b) {
; CHECK-NEXT: vmov.f16 s0, r0
; CHECK-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NEXT: vldr.16 s2, .LCPI15_1
-; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vcmp.f16 s2, s0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vselge.f16 s0, s0, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 1
@@ -434,9 +434,9 @@ define half @fp16_vmaxnm_NNNu(half %b) {
; CHECK-NEXT: vmov.f16 s2, #1.200000e+01
; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NEXT: vldr.16 s2, .LCPI21_0
-; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vcmp.f16 s0, s2
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vselge.f16 s0, s2, s0
+; CHECK-NEXT: vselgt.f16 s0, s0, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 1
@@ -458,9 +458,9 @@ define half @fp16_vmaxnm_NNNuge(half %b) {
; CHECK-NEXT: vmov.f16 s0, r0
; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NEXT: vldr.16 s2, .LCPI22_1
-; CHECK-NEXT: vcmp.f16 s2, s0
+; CHECK-NEXT: vcmp.f16 s0, s2
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NEXT: vselge.f16 s0, s0, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 1
diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
index 878b7f2449141..46351782b5dc2 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
@@ -139,8 +139,7 @@ define float @select_one_float(float %a, float %b, float %c, float %d) {
; P8-LABEL: select_one_float:
; P8: # %bb.0: # %entry
; P8-NEXT: fcmpu cr0, f1, f2
-; P8-NEXT: crnor 4*cr5+lt, un, eq
-; P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; P8-NEXT: bne cr0, .LBB5_2
; P8-NEXT: # %bb.1: # %entry
; P8-NEXT: fmr f3, f4
; P8-NEXT: .LBB5_2: # %entry
@@ -150,8 +149,7 @@ define float @select_one_float(float %a, float %b, float %c, float %d) {
; P9-LABEL: select_one_float:
; P9: # %bb.0: # %entry
; P9-NEXT: fcmpu cr0, f1, f2
-; P9-NEXT: crnor 4*cr5+lt, un, eq
-; P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; P9-NEXT: bne cr0, .LBB5_2
; P9-NEXT: # %bb.1: # %entry
; P9-NEXT: fmr f3, f4
; P9-NEXT: .LBB5_2: # %entry
@@ -167,8 +165,7 @@ define float @select_one_float_nsz(float %a, float %b, float %c, float %d) {
; P8-LABEL: select_one_float_nsz:
; P8: # %bb.0: # %entry
; P8-NEXT: fcmpu cr0, f1, f2
-; P8-NEXT: crnor 4*cr5+lt, un, eq
-; P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2
+; P8-NEXT: bne cr0, .LBB6_2
; P8-NEXT: # %bb.1: # %entry
; P8-NEXT: fmr f3, f4
; P8-NEXT: .LBB6_2: # %entry
@@ -178,8 +175,7 @@ define float @select_one_float_nsz(float %a, float %b, float %c, float %d) {
; P9-LABEL: select_one_float_nsz:
; P9: # %bb.0: # %entry
; P9-NEXT: fcmpu cr0, f1, f2
-; P9-NEXT: crnor 4*cr5+lt, un, eq
-; P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2
+; P9-NEXT: bne cr0, .LBB6_2
; P9-NEXT: # %bb.1: # %entry
; P9-NEXT: fmr f3, f4
; P9-NEXT: .LBB6_2: # %entry
@@ -194,9 +190,8 @@ entry:
define double @select_one_double(double %a, double %b, double %c, double %d) {
; P8-LABEL: select_one_double:
; P8: # %bb.0: # %entry
-; P8-NEXT: fcmpu cr0, f1, f2
-; P8-NEXT: crnor 4*cr5+lt, un, eq
-; P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: bne cr0, .LBB7_2
; P8-NEXT: # %bb.1: # %entry
; P8-NEXT: fmr f3, f4
; P8-NEXT: .LBB7_2: # %entry
@@ -205,9 +200,8 @@ define double @select_one_double(double %a, double %b, double %c, double %d) {
;
; P9-LABEL: select_one_double:
; P9: # %bb.0: # %entry
-; P9-NEXT: fcmpu cr0, f1, f2
-; P9-NEXT: crnor 4*cr5+lt, un, eq
-; P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: bne cr0, .LBB7_2
; P9-NEXT: # %bb.1: # %entry
; P9-NEXT: fmr f3, f4
; P9-NEXT: .LBB7_2: # %entry
@@ -269,8 +263,7 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) {
; P8-LABEL: select_oge_float:
; P8: # %bb.0: # %entry
; P8-NEXT: fcmpu cr0, f1, f2
-; P8-NEXT: crnor 4*cr5+lt, un, lt
-; P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2
+; P8-NEXT: bge cr0, .LBB10_2
; P8-NEXT: # %bb.1: # %entry
; P8-NEXT: fmr f3, f4
; P8-NEXT: .LBB10_2: # %entry
@@ -280,8 +273,7 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) {
; P9-LABEL: select_oge_float:
; P9: # %bb.0: # %entry
; P9-NEXT: fcmpu cr0, f1, f2
-; P9-NEXT: crnor 4*cr5+lt, un, lt
-; P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2
+; P9-NEXT: bge cr0, .LBB10_2
; P9-NEXT: # %bb.1: # %entry
; P9-NEXT: fmr f3, f4
; P9-NEXT: .LBB10_2: # %entry
@@ -296,9 +288,8 @@ entry:
define double @select_oge_double(double %a, double %b, double %c, double %d) {
; P8-LABEL: select_oge_double:
; P8: # %bb.0: # %entry
-; P8-NEXT: fcmpu cr0, f1, f2
-; P8-NEXT: crnor 4*cr5+lt, un, lt
-; P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: bge cr0, .LBB11_2
; P8-NEXT: # %bb.1: # %entry
; P8-NEXT: fmr f3, f4
; P8-NEXT: .LBB11_2: # %entry
@@ -307,9 +298,8 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) {
;
; P9-LABEL: select_oge_double:
; P9: # %bb.0: # %entry
-; P9-NEXT: fcmpu cr0, f1, f2
-; P9-NEXT: crnor 4*cr5+lt, un, lt
-; P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: bge cr0, .LBB11_2
; P9-NEXT: # %bb.1: # %entry
; P9-NEXT: fmr f3, f4
; P9-NEXT: .LBB11_2: # %entry
@@ -543,8 +533,7 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) {
; P8-LABEL: select_ole_float:
; P8: # %bb.0: # %entry
; P8-NEXT: fcmpu cr0, f1, f2
-; P8-NEXT: crnor 4*cr5+lt, un, gt
-; P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2
+; P8-NEXT: ble cr0, .LBB22_2
; P8-NEXT: # %bb.1: # %entry
; P8-NEXT: fmr f3, f4
; P8-NEXT: .LBB22_2: # %entry
@@ -554,8 +543,7 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) {
; P9-LABEL: select_ole_float:
; P9: # %bb.0: # %entry
; P9-NEXT: fcmpu cr0, f1, f2
-; P9-NEXT: crnor 4*cr5+lt, un, gt
-; P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2
+; P9-NEXT: ble cr0, .LBB22_2
; P9-NEXT: # %bb.1: # %entry
; P9-NEXT: fmr f3, f4
; P9-NEXT: .LBB22_2: # %entry
@@ -570,9 +558,8 @@ entry:
define double @select_ole_double(double %a, double %b, double %c, double %d) {
; P8-LABEL: select_ole_double:
; P8: # %bb.0: # %entry
-; P8-NEXT: fcmpu cr0, f1, f2
-; P8-NEXT: crnor 4*cr5+lt, un, gt
-; P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: ble cr0, .LBB23_2
; P8-NEXT: # %bb.1: # %entry
; P8-NEXT: fmr f3, f4
; P8-NEXT: .LBB23_2: # %entry
@@ -581,9 +568,8 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) {
;
; P9-LABEL: select_ole_double:
; P9: # %bb.0: # %entry
-; P9-NEXT: fcmpu cr0, f1, f2
-; P9-NEXT: crnor 4*cr5+lt, un, gt
-; P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: ble cr0, .LBB23_2
; P9-NEXT: # %bb.1: # %entry
; P9-NEXT: fmr f3, f4
; P9-NEXT: .LBB23_2: # %entry
@@ -637,14 +623,11 @@ define double @onecmp1(double %a, double %y, double %z) {
; P8: # %bb.0: # %entry
; P8-NEXT: vspltisw v2, 1
; P8-NEXT: xvcvsxwdp vs0, vs34
-; P8-NEXT: fcmpu cr0, f1, f0
-; P8-NEXT: bc 12, lt, .LBB26_3
+; P8-NEXT: xscmpudp cr0, f1, f0
+; P8-NEXT: blt cr0, .LBB26_2
; P8-NEXT: # %bb.1: # %entry
-; P8-NEXT: fcmpu cr0, f1, f1
-; P8-NEXT: bc 12, un, .LBB26_3
-; P8-NEXT: # %bb.2: # %entry
; P8-NEXT: fmr f3, f2
-; P8-NEXT: .LBB26_3: # %entry
+; P8-NEXT: .LBB26_2: # %entry
; P8-NEXT: fmr f1, f3
; P8-NEXT: blr
;
@@ -652,14 +635,11 @@ define double @onecmp1(double %a, double %y, double %z) {
; P9: # %bb.0: # %entry
; P9-NEXT: vspltisw v2, 1
; P9-NEXT: xvcvsxwdp vs0, vs34
-; P9-NEXT: fcmpu cr0, f1, f0
-; P9-NEXT: bc 12, lt, .LBB26_3
+; P9-NEXT: xscmpudp cr0, f1, f0
+; P9-NEXT: blt cr0, .LBB26_2
; P9-NEXT: # %bb.1: # %entry
-; P9-NEXT: fcmpu cr0, f1, f1
-; P9-NEXT: bc 12, un, .LBB26_3
-; P9-NEXT: # %bb.2: # %entry
; P9-NEXT: fmr f3, f2
-; P9-NEXT: .LBB26_3: # %entry
+; P9-NEXT: .LBB26_2: # %entry
; P9-NEXT: fmr f1, f3
; P9-NEXT: blr
entry:
>From d90c1afd0984d6122800e3392913b13030163410 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sat, 31 Jan 2026 08:55:44 +0800
Subject: [PATCH 20/24] support SDNodeFlags in getSetCC
---
llvm/include/llvm/CodeGen/SelectionDAG.h | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index a208481df0c70..a9793f6c233b7 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -1359,7 +1359,7 @@ class SelectionDAG {
/// ISD::CondCode instead of an SDValue.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
ISD::CondCode Cond, SDValue Chain = SDValue(),
- bool IsSignaling = false) {
+ bool IsSignaling = false, SDNodeFlags Flags = {}) {
assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
"Vector/scalar operand type mismatch for setcc");
assert(LHS.getValueType().isVector() == VT.isVector() &&
@@ -1368,8 +1368,9 @@ class SelectionDAG {
"Cannot create a setCC of an invalid node.");
if (Chain)
return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
- {VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)});
- return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
+ {VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)},
+ Flags);
+ return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond), Flags);
}
/// Helper function to make it easier to build VP_SETCCs if you just have an
>From 4d874c8939516b4233513a4b42dd0804bc979bcb Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sat, 31 Jan 2026 08:56:05 +0800
Subject: [PATCH 21/24] pass SDNodeFlags to getSetCC directly
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 07ce4c1658ed3..5bb291007f1d6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6876,8 +6876,8 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
SDNodeFlags Flags = LHS->getFlags() & RHS->getFlags();
SDValue MinMaxValue =
DAG.getNode(NewOpcode, DL, OpVT, Operand1, Operand2, Flags);
- SelectionDAG::FlagInserter FlagInserter(DAG, Flags);
- return DAG.getSetCC(DL, VT, MinMaxValue, CommonValue, CC);
+ return DAG.getSetCC(DL, VT, MinMaxValue, CommonValue, CC, /*Chain=*/{},
+ /*IsSignaling=*/false, Flags);
}
}
}
>From 8e2ab51d9f0c97a81b49a1d3202242539403c141 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sat, 31 Jan 2026 14:02:38 +0800
Subject: [PATCH 22/24] Set SDNodeFlags when building setcc
---
llvm/include/llvm/CodeGen/SelectionDAG.h | 2 +-
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 9 ++++++---
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 3 ++-
3 files changed, 9 insertions(+), 5 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index a9793f6c233b7..d176fdbe9e7a3 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -2093,7 +2093,7 @@ class SelectionDAG {
/// Constant fold a setcc to true or false.
LLVM_ABI SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
- const SDLoc &dl);
+ const SDLoc &dl, SDNodeFlags Flags = {});
/// Return true if the sign bit of Op is known to be zero.
/// We use this predicate to simplify operations downstream.
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 250b3d2702311..11d3ef2c966e3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2654,7 +2654,8 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
}
SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
- ISD::CondCode Cond, const SDLoc &dl) {
+ ISD::CondCode Cond, const SDLoc &dl,
+ SDNodeFlags Flags) {
EVT OpVT = N1.getValueType();
auto GetUndefBooleanConstant = [&]() {
@@ -2783,7 +2784,8 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
return SDValue();
- return getSetCC(dl, VT, N2, N1, SwappedCond);
+ return getSetCC(dl, VT, N2, N1, SwappedCond, /*Chian=*/{},
+ /*IsSignaling=*/false, Flags);
} else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
(OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
// If an operand is known to be a nan (or undef that could be a nan), we can
@@ -8320,7 +8322,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
N1.getValueType().getVectorElementCount()) &&
"SETCC vector element counts must match!");
// Use FoldSetCC to simplify SETCC's.
- if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
+ if (SDValue V =
+ FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL, Flags))
return V;
break;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index d4131a9151740..9bf50d0a5152f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3807,7 +3807,8 @@ void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
- setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
+ setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition,
+ /*Chian=*/{}, /*IsSignaling=*/false, Flags));
}
// Check if the condition of the select has one use or two users that are both
>From 19843ce6fe2df716b16b34e33e2f00fc4872b297 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sat, 31 Jan 2026 15:33:22 +0800
Subject: [PATCH 23/24] use flag inserter when flag is not available
---
llvm/include/llvm/CodeGen/SelectionDAG.h | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index d176fdbe9e7a3..c0ffe914102cf 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -45,6 +45,7 @@
#include <cstdint>
#include <functional>
#include <map>
+#include <optional>
#include <set>
#include <string>
#include <tuple>
@@ -1359,18 +1360,22 @@ class SelectionDAG {
/// ISD::CondCode instead of an SDValue.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
ISD::CondCode Cond, SDValue Chain = SDValue(),
- bool IsSignaling = false, SDNodeFlags Flags = {}) {
+ bool IsSignaling = false,
+ std::optional<SDNodeFlags> Flags = {}) {
assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
"Vector/scalar operand type mismatch for setcc");
assert(LHS.getValueType().isVector() == VT.isVector() &&
"Vector/scalar result type mismatch for setcc");
assert(Cond != ISD::SETCC_INVALID &&
"Cannot create a setCC of an invalid node.");
+ // TODO: Get rid of FlagInserter when TargetLowering can handle SDNodeFlags.
+ SDNodeFlags NodeFlags =
+ Flags.value_or(Inserter ? Inserter->getFlags() : SDNodeFlags());
if (Chain)
return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
{VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)},
- Flags);
- return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond), Flags);
+ NodeFlags);
+ return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond), NodeFlags);
}
/// Helper function to make it easier to build VP_SETCCs if you just have an
>From 3a34632f9d2034947fda43135b3c52db3bd0b141 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 2 Feb 2026 08:13:37 +0800
Subject: [PATCH 24/24] propagate SDNodeFlags in getSqrtInputTest
---
llvm/include/llvm/CodeGen/SelectionDAG.h | 11 ++--
llvm/include/llvm/CodeGen/TargetLowering.h | 3 +-
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 50 +++++++++++--------
.../CodeGen/SelectionDAG/TargetLowering.cpp | 11 ++--
.../Target/AArch64/AArch64ISelLowering.cpp | 9 ++--
llvm/lib/Target/AArch64/AArch64ISelLowering.h | 3 +-
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 7 +--
llvm/lib/Target/PowerPC/PPCISelLowering.h | 3 +-
8 files changed, 55 insertions(+), 42 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index c0ffe914102cf..d176fdbe9e7a3 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -45,7 +45,6 @@
#include <cstdint>
#include <functional>
#include <map>
-#include <optional>
#include <set>
#include <string>
#include <tuple>
@@ -1360,22 +1359,18 @@ class SelectionDAG {
/// ISD::CondCode instead of an SDValue.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
ISD::CondCode Cond, SDValue Chain = SDValue(),
- bool IsSignaling = false,
- std::optional<SDNodeFlags> Flags = {}) {
+ bool IsSignaling = false, SDNodeFlags Flags = {}) {
assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
"Vector/scalar operand type mismatch for setcc");
assert(LHS.getValueType().isVector() == VT.isVector() &&
"Vector/scalar result type mismatch for setcc");
assert(Cond != ISD::SETCC_INVALID &&
"Cannot create a setCC of an invalid node.");
- // TODO: Get rid of FlagInserter when TargetLowering can handle SDNodeFlags.
- SDNodeFlags NodeFlags =
- Flags.value_or(Inserter ? Inserter->getFlags() : SDNodeFlags());
if (Chain)
return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
{VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)},
- NodeFlags);
- return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond), NodeFlags);
+ Flags);
+ return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond), Flags);
}
/// Helper function to make it easier to build VP_SETCCs if you just have an
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 442225bdec01f..6c9713f7ac6c2 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -5425,7 +5425,8 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
/// comparison may check if the operand is NAN, INF, zero, normal, etc. The
/// result should be used as the condition operand for a select or branch.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
- const DenormalMode &Mode) const;
+ const DenormalMode &Mode,
+ SDNodeFlags Flags = {}) const;
/// Return a target-dependent result if the input operand is not suitable for
/// use with a square root estimate calculation.
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 5bb291007f1d6..951be2b466611 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -662,9 +662,9 @@ namespace {
bool InexpensiveOnly = false,
std::optional<EVT> OutVT = std::nullopt);
SDValue BuildDivEstimate(SDValue N, SDValue Op, SDNodeFlags Flags);
- SDValue buildRsqrtEstimate(SDValue Op);
- SDValue buildSqrtEstimate(SDValue Op);
- SDValue buildSqrtEstimateImpl(SDValue Op, bool Recip);
+ SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags);
+ SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags Flags);
+ SDValue buildSqrtEstimateImpl(SDValue Op, bool Recip, SDNodeFlags Flags);
SDValue buildSqrtNROneConst(SDValue Arg, SDValue Est, unsigned Iterations,
bool Reciprocal);
SDValue buildSqrtNRTwoConst(SDValue Arg, SDValue Est, unsigned Iterations,
@@ -14816,7 +14816,8 @@ SDValue DAGCombiner::foldSextSetcc(SDNode *N) {
SDLoc DL(N);
// Propagate fast-math-flags.
- SelectionDAG::FlagInserter FlagsInserter(DAG, N0->getFlags());
+ SDNodeFlags Flags = N0->getFlags();
+ SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
// On some architectures (such as SSE/NEON/etc) the SETCC result type is
// the same size as the compared operands. Try to optimize sext(setcc())
@@ -14834,14 +14835,16 @@ SDValue DAGCombiner::foldSextSetcc(SDNode *N) {
// we know that the element size of the sext'd result matches the
// element size of the compare operands.
if (VT.getSizeInBits() == SVT.getSizeInBits())
- return DAG.getSetCC(DL, VT, N00, N01, CC);
+ return DAG.getSetCC(DL, VT, N00, N01, CC, /*Chain=*/{},
+ /*Signaling=*/false, Flags);
// If the desired elements are smaller or larger than the source
// elements, we can use a matching integer vector type and then
// truncate/sign extend.
EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
if (SVT == MatchingVecType) {
- SDValue VsetCC = DAG.getSetCC(DL, MatchingVecType, N00, N01, CC);
+ SDValue VsetCC = DAG.getSetCC(DL, MatchingVecType, N00, N01, CC,
+ /*Chain=*/{}, /*Signaling=*/false, Flags);
return DAG.getSExtOrTrunc(VsetCC, DL, VT);
}
}
@@ -14887,7 +14890,8 @@ SDValue DAGCombiner::foldSextSetcc(SDNode *N) {
if (IsFreeToExtend(N00) && IsFreeToExtend(N01)) {
SDValue Ext0 = DAG.getNode(ExtOpcode, DL, VT, N00);
SDValue Ext1 = DAG.getNode(ExtOpcode, DL, VT, N01);
- return DAG.getSetCC(DL, VT, Ext0, Ext1, CC);
+ return DAG.getSetCC(DL, VT, Ext0, Ext1, CC, /*Chain=*/{},
+ /*Signaling=*/false, Flags);
}
}
}
@@ -14919,7 +14923,8 @@ SDValue DAGCombiner::foldSextSetcc(SDNode *N) {
// because a sext is likely cheaper than a select?
if (SetCCVT.getScalarSizeInBits() != 1 &&
(!LegalOperations || TLI.isOperationLegal(ISD::SETCC, N00VT))) {
- SDValue SetCC = DAG.getSetCC(DL, SetCCVT, N00, N01, CC);
+ SDValue SetCC = DAG.getSetCC(DL, SetCCVT, N00, N01, CC, /*Chain=*/{},
+ /*Signaling=*/false, Flags);
return DAG.getSelect(DL, VT, SetCC, ExtTrueVal, Zero);
}
}
@@ -18969,19 +18974,21 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
// into a target-specific square root estimate instruction.
bool N1AllowReciprocal = N1->getFlags().hasAllowReciprocal();
if (N1.getOpcode() == ISD::FSQRT) {
- if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0)))
+ if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0), N1->getFlags()))
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
} else if (N1.getOpcode() == ISD::FP_EXTEND &&
N1.getOperand(0).getOpcode() == ISD::FSQRT &&
N1AllowReciprocal) {
- if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0))) {
+ if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0),
+ N1.getOperand(0)->getFlags())) {
RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV);
AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
}
} else if (N1.getOpcode() == ISD::FP_ROUND &&
N1.getOperand(0).getOpcode() == ISD::FSQRT) {
- if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0))) {
+ if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0),
+ N1.getOperand(0)->getFlags())) {
RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1));
AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
@@ -19013,7 +19020,7 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
SDValue AA = DAG.getNode(ISD::FMUL, DL, VT, A, A);
SDValue AAZ =
DAG.getNode(ISD::FMUL, DL, VT, AA, Sqrt.getOperand(0));
- if (SDValue Rsqrt = buildRsqrtEstimate(AAZ))
+ if (SDValue Rsqrt = buildRsqrtEstimate(AAZ, Sqrt->getFlags()))
return DAG.getNode(ISD::FMUL, DL, VT, N0, Rsqrt);
// Estimate creation failed. Clean up speculatively created nodes.
@@ -19023,7 +19030,8 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
// We found a FSQRT, so try to make this fold:
// X / (Y * sqrt(Z)) -> X * (rsqrt(Z) / Y)
- if (SDValue Rsqrt = buildRsqrtEstimate(Sqrt.getOperand(0))) {
+ if (SDValue Rsqrt =
+ buildRsqrtEstimate(Sqrt.getOperand(0), Sqrt->getFlags())) {
SDValue Div = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, Rsqrt, Y);
AddToWorklist(Div.getNode());
return DAG.getNode(ISD::FMUL, DL, VT, N0, Div);
@@ -19127,7 +19135,7 @@ SDValue DAGCombiner::visitFSQRT(SDNode *N) {
// transform the fdiv, we may produce a sub-optimal estimate sequence
// because the reciprocal calculation may not have to filter out a
// 0.0 input.
- return buildSqrtEstimate(N0);
+ return buildSqrtEstimate(N0, Flags);
}
/// copysign(x, fp_extend(y)) -> copysign(x, y)
@@ -30366,7 +30374,8 @@ SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est,
/// Build code to calculate either rsqrt(Op) or sqrt(Op). In the latter case
/// Op*rsqrt(Op) is actually computed, so additional postprocessing is needed if
/// Op can be zero.
-SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, bool Reciprocal) {
+SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, bool Reciprocal,
+ SDNodeFlags Flags) {
if (LegalDAG)
return SDValue();
@@ -30399,7 +30408,8 @@ SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, bool Reciprocal) {
if (!Reciprocal) {
SDLoc DL(Op);
// Try the target specific test first.
- SDValue Test = TLI.getSqrtInputTest(Op, DAG, DAG.getDenormalMode(VT));
+ SDValue Test =
+ TLI.getSqrtInputTest(Op, DAG, DAG.getDenormalMode(VT), Flags);
// The estimate is now completely wrong if the input was exactly 0.0 or
// possibly a denormal. Force the answer to 0.0 or value provided by
@@ -30413,12 +30423,12 @@ SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, bool Reciprocal) {
return SDValue();
}
-SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op) {
- return buildSqrtEstimateImpl(Op, true);
+SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags) {
+ return buildSqrtEstimateImpl(Op, true, Flags);
}
-SDValue DAGCombiner::buildSqrtEstimate(SDValue Op) {
- return buildSqrtEstimateImpl(Op, false);
+SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags Flags) {
+ return buildSqrtEstimateImpl(Op, false, Flags);
}
/// Return true if there is any possibility that the two addresses overlap.
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 30a982b580620..ee2b222427eb1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -7501,7 +7501,8 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
}
SDValue TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
- const DenormalMode &Mode) const {
+ const DenormalMode &Mode,
+ SDNodeFlags Flags) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
@@ -7512,7 +7513,8 @@ SDValue TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
if (Mode.Input == DenormalMode::PreserveSign ||
Mode.Input == DenormalMode::PositiveZero) {
// Test = X == 0.0
- return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
+ return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ, /*Chain=*/{},
+ /*Signaling=*/false, Flags);
}
// Testing it with denormal inputs to avoid wrong estimate.
@@ -7521,8 +7523,9 @@ SDValue TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
const fltSemantics &FltSem = VT.getFltSemantics();
APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem);
SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT);
- SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op);
- return DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT);
+ SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op, Flags);
+ return DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT, /*Chain=*/{},
+ /*Signaling=*/false, Flags);
}
SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 3f2b73db8dee5..7f50174c6314d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -13130,14 +13130,15 @@ static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode,
return SDValue();
}
-SDValue
-AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
- const DenormalMode &Mode) const {
+SDValue AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
+ const DenormalMode &Mode,
+ SDNodeFlags Flags) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
- return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
+ return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ, /*Chain=*/{},
+ /*Signaling=*/false, Flags);
}
SDValue
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index aa6110a4ce39d..252d8d32768a5 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -824,7 +824,8 @@ class AArch64TargetLowering : public TargetLowering {
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
int &ExtraSteps) const override;
SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
- const DenormalMode &Mode) const override;
+ const DenormalMode &Mode,
+ SDNodeFlags Flags = {}) const override;
SDValue getSqrtResultForDenormInput(SDValue Operand,
SelectionDAG &DAG) const override;
unsigned combineRepeatedFPDivisors() const override;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 4a8b1998907cd..e7d6e5c54ab51 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -14716,17 +14716,18 @@ static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
}
SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
- const DenormalMode &Mode) const {
+ const DenormalMode &Mode,
+ SDNodeFlags Flags) const {
// We only have VSX Vector Test for software Square Root.
EVT VT = Op.getValueType();
if (!isTypeLegal(MVT::i1) ||
(VT != MVT::f64 &&
((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
- return TargetLowering::getSqrtInputTest(Op, DAG, Mode);
+ return TargetLowering::getSqrtInputTest(Op, DAG, Mode, Flags);
SDLoc DL(Op);
// The output register of FTSQRT is CR field.
- SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op);
+ SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op, Flags);
// ftsqrt BF,FRB
// Let e_b be the unbiased exponent of the double-precision
// floating-point operand in register FRB.
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 52e79469c78da..b9c07106ce027 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -890,7 +890,8 @@ namespace llvm {
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
int &RefinementSteps) const override;
SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
- const DenormalMode &Mode) const override;
+ const DenormalMode &Mode,
+ SDNodeFlags Flags = {}) const override;
SDValue getSqrtResultForDenormInput(SDValue Operand,
SelectionDAG &DAG) const override;
unsigned combineRepeatedFPDivisors() const override;
More information about the llvm-commits
mailing list