[llvm] [SelectionDAGBuilder] Remove NoNaNsFPMath uses (PR #169904)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 1 00:55:32 PST 2025
https://github.com/paperchalice updated https://github.com/llvm/llvm-project/pull/169904
>From e2cbee684a83e3e6db12a0c9fd2cec3f3bbab7f0 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Fri, 28 Nov 2025 19:36:20 +0800
Subject: [PATCH 01/12] [SelectionDAG] Remove NoNaNsFPMath Replaced by checking
fast-math flags or nofpclass.
---
.../SelectionDAG/SelectionDAGBuilder.cpp | 31 +++++++++---
llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll | 50 +++++++++----------
2 files changed, 48 insertions(+), 33 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 53d73ad618bd1..9f9f69be6d8ce 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2446,6 +2446,26 @@ static bool InBlock(const Value *V, const BasicBlock *BB) {
return true;
}
+static bool AreFCmpOperandsNonNaN(const Instruction *Inst,
+ const SelectionDAG &DAG) {
+ assert(
+ (isa<FCmpInst>(Inst) || isa<ConstrainedFPCmpIntrinsic>(Inst) ||
+ (isa<VPIntrinsic>(Inst) &&
+ dyn_cast<VPIntrinsic>(Inst)->getIntrinsicID() == Intrinsic::vp_fcmp)) &&
+ "Not fcmp instruction or its intrinsic variants!");
+
+ if (const auto *FPOp = dyn_cast<FPMathOperator>(Inst))
+ if (FPOp->hasNoNaNs())
+ return true;
+
+ for (int I = 0; I != 2; ++I)
+ if (!isKnownNeverNaN(Inst->getOperand(I),
+ SimplifyQuery(DAG.getDataLayout(), Inst)))
+ return false;
+
+ return true;
+}
+
/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
/// This function emits a branch and is used at the leaves of an OR or an
/// AND operator tree.
@@ -2479,7 +2499,7 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
FCmpInst::Predicate Pred =
InvertCond ? FC->getInversePredicate() : FC->getPredicate();
Condition = getFCmpCondCode(Pred);
- if (TM.Options.NoNaNsFPMath)
+ if (AreFCmpOperandsNonNaN(FC, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
}
@@ -3754,7 +3774,7 @@ void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
ISD::CondCode Condition = getFCmpCondCode(predicate);
auto *FPMO = cast<FPMathOperator>(&I);
- if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
+ if (AreFCmpOperandsNonNaN(&I, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
SDNodeFlags Flags;
@@ -8496,7 +8516,7 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
case ISD::STRICT_FSETCCS: {
auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
- if (TM.Options.NoNaNsFPMath)
+ if (AreFCmpOperandsNonNaN(FPCmp, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
Opers.push_back(DAG.getCondCode(Condition));
break;
@@ -8779,11 +8799,8 @@ void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
CmpInst::Predicate CondCode = VPIntrin.getPredicate();
bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
if (IsFP) {
- // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
- // flags, but calls that don't return floating-point types can't be
- // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
Condition = getFCmpCondCode(CondCode);
- if (TM.Options.NoNaNsFPMath)
+ if (AreFCmpOperandsNonNaN(&VPIntrin, DAG))
Condition = getFCmpCodeWithoutNaN(Condition);
} else {
Condition = getICmpCondCode(CondCode);
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
index ff923efe8eb43..5cf8ae5cb18f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
@@ -443,7 +443,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp one <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan one <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -611,7 +611,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ueq <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan ueq <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -685,7 +685,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ugt <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan ugt <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -759,7 +759,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp uge <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan uge <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -833,7 +833,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ult <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan ult <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -907,7 +907,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ule <vscale x 8 x bfloat> %va, %splat
+ %vc = fcmp nnan ule <vscale x 8 x bfloat> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp one <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan one <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -1934,7 +1934,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ueq <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan ueq <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2041,7 +2041,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ugt <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan ugt <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2148,7 +2148,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp uge <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan uge <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2255,7 +2255,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ult <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan ult <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2362,7 +2362,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
; ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ule <vscale x 8 x half> %va, %splat
+ %vc = fcmp nnan ule <vscale x 8 x half> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -2939,7 +2939,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp one <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan one <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3070,7 +3070,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ueq <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan ueq <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3129,7 +3129,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ugt <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan ugt <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3188,7 +3188,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp uge <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan uge <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3247,7 +3247,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ult <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan ult <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3306,7 +3306,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ule <vscale x 8 x float> %va, %splat
+ %vc = fcmp nnan ule <vscale x 8 x float> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3773,7 +3773,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp one <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan one <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3904,7 +3904,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ueq <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan ueq <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -3963,7 +3963,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ugt <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan ugt <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4022,7 +4022,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp uge <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan uge <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4081,7 +4081,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ult <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan ult <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4140,7 +4140,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
- %vc = fcmp ule <vscale x 8 x double> %va, %splat
+ %vc = fcmp nnan ule <vscale x 8 x double> %va, %splat
ret <vscale x 8 x i1> %vc
}
@@ -4322,5 +4322,3 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
%vc = fcmp oeq <vscale x 16 x double> %va, zeroinitializer
ret <vscale x 16 x i1> %vc
}
-
-attributes #0 = { "no-nans-fp-math"="true" }
>From 15378a36ddb58d85d43e02f85140945094d7ab47 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Fri, 28 Nov 2025 21:19:32 +0800
Subject: [PATCH 02/12] [X86] Consider fast-math flags when combining select
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1b0bf6823e390..83bf275b4270d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -48011,6 +48011,8 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
}
if (Opcode) {
+ // Propagate fast-math-flags.
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N->getFlags());
if (IsStrict) {
SDValue Ret = DAG.getNode(Opcode == X86ISD::FMIN ? X86ISD::STRICT_FMIN
: X86ISD::STRICT_FMAX,
@@ -55532,8 +55534,9 @@ static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
// FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
- if (!DAG.getTarget().Options.NoNaNsFPMath ||
- !DAG.getTarget().Options.NoSignedZerosFPMath)
+ if ((!DAG.getTarget().Options.NoNaNsFPMath && !N->getFlags().hasNoNaNs()) ||
+ (!DAG.getTarget().Options.NoSignedZerosFPMath &&
+ !N->getFlags().hasNoSignedZeros()))
return SDValue();
// If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
>From 6e0080b6e93a8f1c2b4cb43b06df5e60a69dda93 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Fri, 28 Nov 2025 21:36:44 +0800
Subject: [PATCH 03/12] Fix X86 tests
---
llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll | 16 +-
llvm/test/CodeGen/X86/avx-minmax.ll | 18 +-
llvm/test/CodeGen/X86/sse-minmax-finite.ll | 787 +++++++++++
llvm/test/CodeGen/X86/sse-minmax-unsafe.ll | 687 ++++++++++
llvm/test/CodeGen/X86/sse-minmax.ll | 1332 ++++++-------------
5 files changed, 1924 insertions(+), 916 deletions(-)
create mode 100644 llvm/test/CodeGen/X86/sse-minmax-finite.ll
create mode 100644 llvm/test/CodeGen/X86/sse-minmax-unsafe.ll
diff --git a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
index 940fe8cf6ba75..9f072c6334c07 100644
--- a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
+++ b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
@@ -1,13 +1,17 @@
-; RUN: llc < %s -mtriple=i686-- -mattr=-sse | FileCheck %s -check-prefix=WITHNANS
-; RUN: llc < %s -mtriple=i686-- -mattr=-sse -enable-no-nans-fp-math | FileCheck %s -check-prefix=NONANS
+; RUN: llc < %s -mtriple=i686-- -mattr=-sse | FileCheck %s
-; WITHNANS-LABEL: test:
-; WITHNANS: setnp
-; NONANS-LABEL: test:
-; NONANS-NOT: setnp
define i32 @test(float %f) {
+; CHECK-LABEL: test:
+; CHECK: setnp
%tmp = fcmp oeq float %f, 0.000000e+00 ; <i1> [#uses=1]
%tmp.upgrd.1 = zext i1 %tmp to i32 ; <i32> [#uses=1]
ret i32 %tmp.upgrd.1
}
+define i32 @test_nnan(float %f) {
+; CHECK-LABEL: test_nnan:
+; CHECK-NOT: setnp
+ %tmp = fcmp nnan oeq float %f, 0.000000e+00 ; <i1> [#uses=1]
+ %tmp.upgrd.1 = zext i1 %tmp to i32 ; <i32> [#uses=1]
+ ret i32 %tmp.upgrd.1
+}
diff --git a/llvm/test/CodeGen/X86/avx-minmax.ll b/llvm/test/CodeGen/X86/avx-minmax.ll
index 8e4b6c6af4cb1..27864a9eefa8e 100644
--- a/llvm/test/CodeGen/X86/avx-minmax.ll
+++ b/llvm/test/CodeGen/X86/avx-minmax.ll
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -enable-no-nans-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s
define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) {
; CHECK-LABEL: maxpd:
; CHECK: # %bb.0:
; CHECK-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
- %max_is_x = fcmp oge <2 x double> %x, %y
+ %max_is_x = fcmp nnan oge <2 x double> %x, %y
%max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %max
}
@@ -16,7 +16,7 @@ define <2 x double> @minpd(<2 x double> %x, <2 x double> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vminpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
- %min_is_x = fcmp ole <2 x double> %x, %y
+ %min_is_x = fcmp nnan ole <2 x double> %x, %y
%min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %min
}
@@ -26,7 +26,7 @@ define <4 x float> @maxps(<4 x float> %x, <4 x float> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vmaxps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
- %max_is_x = fcmp oge <4 x float> %x, %y
+ %max_is_x = fcmp nnan oge <4 x float> %x, %y
%max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %max
}
@@ -36,7 +36,7 @@ define <4 x float> @minps(<4 x float> %x, <4 x float> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vminps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
- %min_is_x = fcmp ole <4 x float> %x, %y
+ %min_is_x = fcmp nnan ole <4 x float> %x, %y
%min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %min
}
@@ -46,7 +46,7 @@ define <4 x double> @vmaxpd(<4 x double> %x, <4 x double> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
- %max_is_x = fcmp oge <4 x double> %x, %y
+ %max_is_x = fcmp nnan oge <4 x double> %x, %y
%max = select <4 x i1> %max_is_x, <4 x double> %x, <4 x double> %y
ret <4 x double> %max
}
@@ -56,7 +56,7 @@ define <4 x double> @vminpd(<4 x double> %x, <4 x double> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vminpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
- %min_is_x = fcmp ole <4 x double> %x, %y
+ %min_is_x = fcmp nnan ole <4 x double> %x, %y
%min = select <4 x i1> %min_is_x, <4 x double> %x, <4 x double> %y
ret <4 x double> %min
}
@@ -66,7 +66,7 @@ define <8 x float> @vmaxps(<8 x float> %x, <8 x float> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vmaxps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
- %max_is_x = fcmp oge <8 x float> %x, %y
+ %max_is_x = fcmp nnan oge <8 x float> %x, %y
%max = select <8 x i1> %max_is_x, <8 x float> %x, <8 x float> %y
ret <8 x float> %max
}
@@ -76,7 +76,7 @@ define <8 x float> @vminps(<8 x float> %x, <8 x float> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vminps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
- %min_is_x = fcmp ole <8 x float> %x, %y
+ %min_is_x = fcmp nnan ole <8 x float> %x, %y
%min = select <8 x i1> %min_is_x, <8 x float> %x, <8 x float> %y
ret <8 x float> %min
}
diff --git a/llvm/test/CodeGen/X86/sse-minmax-finite.ll b/llvm/test/CodeGen/X86/sse-minmax-finite.ll
new file mode 100644
index 0000000000000..469637964d849
--- /dev/null
+++ b/llvm/test/CodeGen/X86/sse-minmax-finite.ll
@@ -0,0 +1,787 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s
+
+; Some of these patterns can be matched as SSE min or max. Some of
+; them can be matched provided that the operands are swapped.
+; Some of them can't be matched at all and require a comparison
+; and a conditional branch.
+
+; The naming convention is {,x_,y_}{o,u}{gt,lt,ge,le}{,_inverse}
+; _x: use 0.0 instead of %y
+; _y: use -0.0 instead of %y
+; _inverse : swap the arms of the select.
+
+define double @ogt(double %x, double %y) {
+; CHECK-LABEL: ogt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @olt(double %x, double %y) {
+; CHECK-LABEL: olt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ogt_inverse(double %x, double %y) {
+; CHECK-LABEL: ogt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @olt_inverse(double %x, double %y) {
+; CHECK-LABEL: olt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @oge(double %x, double %y) {
+; CHECK-LABEL: oge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ole(double %x, double %y) {
+; CHECK-LABEL: ole:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @oge_inverse(double %x, double %y) {
+; RELAX-LABEL: oge_inverse:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ole_inverse(double %x, double %y) {
+; RELAX-LABEL: ole_inverse:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ogt_x(double %x) {
+; CHECK-LABEL: ogt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @olt_x(double %x) {
+; CHECK-LABEL: olt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ogt_inverse_x(double %x) {
+; CHECK-LABEL: ogt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @olt_inverse_x(double %x) {
+; CHECK-LABEL: olt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @oge_x(double %x) {
+; CHECK-LABEL: oge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ole_x(double %x) {
+; CHECK-LABEL: ole_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @oge_inverse_x(double %x) {
+; RELAX-LABEL: oge_inverse_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ole_inverse_x(double %x) {
+; RELAX-LABEL: ole_inverse_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ugt(double %x, double %y) {
+; RELAX-LABEL: ugt:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ugt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ult(double %x, double %y) {
+; RELAX-LABEL: ult:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ult:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ugt_inverse(double %x, double %y) {
+; CHECK-LABEL: ugt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ult_inverse(double %x, double %y) {
+; CHECK-LABEL: ult_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @uge(double %x, double %y) {
+; CHECK-LABEL: uge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ule(double %x, double %y) {
+; CHECK-LABEL: ule:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, %y
+ %d = select i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @uge_inverse(double %x, double %y) {
+; CHECK-LABEL: uge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ule_inverse(double %x, double %y) {
+; CHECK-LABEL: ule_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, %y
+ %d = select i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ugt_x(double %x) {
+; RELAX-LABEL: ugt_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ugt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ult_x(double %x) {
+; RELAX-LABEL: ult_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ult_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ugt_inverse_x(double %x) {
+; CHECK-LABEL: ugt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ult_inverse_x(double %x) {
+; CHECK-LABEL: ult_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @uge_x(double %x) {
+; CHECK-LABEL: uge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ule_x(double %x) {
+; CHECK-LABEL: ule_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, 0.000000e+00
+ %d = select i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @uge_inverse_x(double %x) {
+; CHECK-LABEL: uge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ule_inverse_x(double %x) {
+; CHECK-LABEL: ule_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, 0.000000e+00
+ %d = select i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ogt_y(double %x) {
+; CHECK-LABEL: ogt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @olt_y(double %x) {
+; CHECK-LABEL: olt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ogt_inverse_y(double %x) {
+; CHECK-LABEL: ogt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ogt double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @olt_inverse_y(double %x) {
+; CHECK-LABEL: olt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan olt double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @oge_y(double %x) {
+; CHECK-LABEL: oge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ole_y(double %x) {
+; CHECK-LABEL: ole_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @oge_inverse_y(double %x) {
+; RELAX-LABEL: oge_inverse_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan oge double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ole_inverse_y(double %x) {
+; RELAX-LABEL: ole_inverse_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ole double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ugt_y(double %x) {
+; RELAX-LABEL: ugt_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ugt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ult_y(double %x) {
+; RELAX-LABEL: ult_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ult_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ugt_inverse_y(double %x) {
+; CHECK-LABEL: ugt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ugt double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ult_inverse_y(double %x) {
+; CHECK-LABEL: ult_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ult double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @uge_y(double %x) {
+; CHECK-LABEL: uge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ule_y(double %x) {
+; CHECK-LABEL: ule_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, -0.000000e+00
+ %d = select i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @uge_inverse_y(double %x) {
+; CHECK-LABEL: uge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan uge double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ule_inverse_y(double %x) {
+; CHECK-LABEL: ule_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nnan ule double %x, -0.000000e+00
+ %d = select i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+; Test a few more misc. cases.
+
+define double @clampTo3k_a(double %x) {
+; CHECK-LABEL: clampTo3k_a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan ogt double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_b(double %x) {
+; CHECK-LABEL: clampTo3k_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan uge double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_c(double %x) {
+; CHECK-LABEL: clampTo3k_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan olt double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_d(double %x) {
+; CHECK-LABEL: clampTo3k_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan ule double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_e(double %x) {
+; CHECK-LABEL: clampTo3k_e:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan olt double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_f(double %x) {
+; CHECK-LABEL: clampTo3k_f:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan ule double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_g(double %x) {
+; CHECK-LABEL: clampTo3k_g:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan ogt double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_h(double %x) {
+; CHECK-LABEL: clampTo3k_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nnan uge double %x, 3.000000e+03
+ %y = select i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_maxpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxpd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nnan oge <2 x double> %x, %y
+ %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %max
+}
+
+define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_minpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minpd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nnan ole <2 x double> %x, %y
+ %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %min
+}
+
+define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_maxps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nnan oge <4 x float> %x, %y
+ %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %max
+}
+
+define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_minps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nnan ole <4 x float> %x, %y
+ %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %min
+}
+
+define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
+; CHECK-LABEL: test_maxps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nnan oge <2 x float> %x, %y
+ %max = select <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
+ ret <2 x float> %max
+}
+
+define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
+; CHECK-LABEL: test_minps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nnan ole <2 x float> %x, %y
+ %min = select <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
+ ret <2 x float> %min
+}
+
+define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
+; CHECK-LABEL: test_maxps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nnan oge <3 x float> %x, %y
+ %max = select <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
+ ret <3 x float> %max
+}
+
+define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
+; CHECK-LABEL: test_minps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nnan ole <3 x float> %x, %y
+ %min = select <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
+ ret <3 x float> %min
+}
+
+; OSS-Fuzz #13838
+; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13838
+define float @ossfuzz13838(float %x) {
+; CHECK-LABEL: ossfuzz13838:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: retq
+bb:
+ %cmp2 = fcmp nnan fast olt float %x, 2.550000e+02
+ %B1 = urem i1 %cmp2, %cmp2
+ %min = select i1 %B1, float %x, float 2.550000e+02
+ %B = frem float %min, 0x47EFFFFFE0000000
+ %cmp1 = fcmp nnan fast olt float %B, 1.000000e+00
+ %r = select i1 %cmp1, float 1.000000e+00, float %min
+ ret float %r
+}
diff --git a/llvm/test/CodeGen/X86/sse-minmax-unsafe.ll b/llvm/test/CodeGen/X86/sse-minmax-unsafe.ll
new file mode 100644
index 0000000000000..88167250a5314
--- /dev/null
+++ b/llvm/test/CodeGen/X86/sse-minmax-unsafe.ll
@@ -0,0 +1,687 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s
+
+; Some of these patterns can be matched as SSE min or max. Some of
+; them can be matched provided that the operands are swapped.
+; Some of them can't be matched at all and require a comparison
+; and a conditional branch.
+
+; The naming convention is {,x_,y_}{o,u}{gt,lt,ge,le}{,_inverse}
+; _x: use 0.0 instead of %y
+; _y: use -0.0 instead of %y
+; _inverse : swap the arms of the select.
+
+define double @ogt(double %x, double %y) {
+; CHECK-LABEL: ogt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @olt(double %x, double %y) {
+; CHECK-LABEL: olt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ogt_inverse(double %x, double %y) {
+; CHECK-LABEL: ogt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @olt_inverse(double %x, double %y) {
+; CHECK-LABEL: olt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @oge(double %x, double %y) {
+; CHECK-LABEL: oge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ole(double %x, double %y) {
+; CHECK-LABEL: ole:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @oge_inverse(double %x, double %y) {
+; CHECK-LABEL: oge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ole_inverse(double %x, double %y) {
+; CHECK-LABEL: ole_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ogt_x(double %x) {
+; CHECK-LABEL: ogt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @olt_x(double %x) {
+; CHECK-LABEL: olt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ogt_inverse_x(double %x) {
+; CHECK-LABEL: ogt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @olt_inverse_x(double %x) {
+; CHECK-LABEL: olt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @oge_x(double %x) {
+; CHECK-LABEL: oge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ole_x(double %x) {
+; CHECK-LABEL: ole_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @oge_inverse_x(double %x) {
+; CHECK-LABEL: oge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ole_inverse_x(double %x) {
+; CHECK-LABEL: ole_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ugt(double %x, double %y) {
+; CHECK-LABEL: ugt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ult(double %x, double %y) {
+; CHECK-LABEL: ult:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ugt_inverse(double %x, double %y) {
+; CHECK-LABEL: ugt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ult_inverse(double %x, double %y) {
+; CHECK-LABEL: ult_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @uge(double %x, double %y) {
+; CHECK-LABEL: uge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @ule(double %x, double %y) {
+; CHECK-LABEL: ule:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, %y
+ %d = select nsz nnan i1 %c, double %x, double %y
+ ret double %d
+}
+
+define double @uge_inverse(double %x, double %y) {
+; CHECK-LABEL: uge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ule_inverse(double %x, double %y) {
+; CHECK-LABEL: ule_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, %y
+ %d = select nsz nnan i1 %c, double %y, double %x
+ ret double %d
+}
+
+define double @ugt_x(double %x) {
+; CHECK-LABEL: ugt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ult_x(double %x) {
+; CHECK-LABEL: ult_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ugt_inverse_x(double %x) {
+; CHECK-LABEL: ugt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ult_inverse_x(double %x) {
+; CHECK-LABEL: ult_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @uge_x(double %x) {
+; CHECK-LABEL: uge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @ule_x(double %x) {
+; CHECK-LABEL: ule_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double 0.000000e+00
+ ret double %d
+}
+
+define double @uge_inverse_x(double %x) {
+; CHECK-LABEL: uge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ule_inverse_x(double %x) {
+; CHECK-LABEL: ule_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, 0.000000e+00
+ %d = select nsz nnan i1 %c, double 0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ogt_y(double %x) {
+; CHECK-LABEL: ogt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @olt_y(double %x) {
+; CHECK-LABEL: olt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ogt_inverse_y(double %x) {
+; CHECK-LABEL: ogt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ogt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @olt_inverse_y(double %x) {
+; CHECK-LABEL: olt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan olt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @oge_y(double %x) {
+; CHECK-LABEL: oge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ole_y(double %x) {
+; CHECK-LABEL: ole_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @oge_inverse_y(double %x) {
+; CHECK-LABEL: oge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan oge double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ole_inverse_y(double %x) {
+; CHECK-LABEL: ole_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ole double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ugt_y(double %x) {
+; CHECK-LABEL: ugt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ult_y(double %x) {
+; CHECK-LABEL: ult_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ugt_inverse_y(double %x) {
+; CHECK-LABEL: ugt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ugt double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ult_inverse_y(double %x) {
+; CHECK-LABEL: ult_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ult double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @uge_y(double %x) {
+; CHECK-LABEL: uge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @ule_y(double %x) {
+; CHECK-LABEL: ule_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double %x, double -0.000000e+00
+ ret double %d
+}
+
+define double @uge_inverse_y(double %x) {
+; CHECK-LABEL: uge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan uge double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+define double @ule_inverse_y(double %x) {
+; CHECK-LABEL: ule_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %c = fcmp nsz nnan ule double %x, -0.000000e+00
+ %d = select nsz nnan i1 %c, double -0.000000e+00, double %x
+ ret double %d
+}
+
+; Test a few more misc. cases.
+
+define double @clampTo3k_a(double %x) {
+; CHECK-LABEL: clampTo3k_a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan ogt double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_b(double %x) {
+; CHECK-LABEL: clampTo3k_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan uge double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_c(double %x) {
+; CHECK-LABEL: clampTo3k_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan olt double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_d(double %x) {
+; CHECK-LABEL: clampTo3k_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan ule double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_e(double %x) {
+; CHECK-LABEL: clampTo3k_e:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan olt double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_f(double %x) {
+; CHECK-LABEL: clampTo3k_f:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan ule double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_g(double %x) {
+; CHECK-LABEL: clampTo3k_g:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan ogt double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define double @clampTo3k_h(double %x) {
+; CHECK-LABEL: clampTo3k_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %t0 = fcmp nsz nnan uge double %x, 3.000000e+03
+ %y = select nsz nnan i1 %t0, double 3.000000e+03, double %x
+ ret double %y
+}
+
+define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_maxpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxpd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nsz nnan oge <2 x double> %x, %y
+ %max = select nsz nnan <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %max
+}
+
+define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_minpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minpd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nsz nnan ole <2 x double> %x, %y
+ %min = select nsz nnan <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %min
+}
+
+define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_maxps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nsz nnan oge <4 x float> %x, %y
+ %max = select nsz nnan <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %max
+}
+
+define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_minps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nsz nnan ole <4 x float> %x, %y
+ %min = select nsz nnan <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %min
+}
+
+define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
+; CHECK-LABEL: test_maxps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nsz nnan oge <2 x float> %x, %y
+ %max = select nsz nnan <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
+ ret <2 x float> %max
+}
+
+define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
+; CHECK-LABEL: test_minps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nsz nnan ole <2 x float> %x, %y
+ %min = select nsz nnan <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
+ ret <2 x float> %min
+}
+
+define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
+; CHECK-LABEL: test_maxps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %max_is_x = fcmp nsz nnan oge <3 x float> %x, %y
+ %max = select nsz nnan <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
+ ret <3 x float> %max
+}
+
+define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
+; CHECK-LABEL: test_minps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minps %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %min_is_x = fcmp nsz nnan ole <3 x float> %x, %y
+ %min = select nsz nnan <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
+ ret <3 x float> %min
+}
+
+; OSS-Fuzz #13838
+; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13838
+define float @ossfuzz13838(float %x) {
+; CHECK-LABEL: ossfuzz13838:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: retq
+bb:
+ %cmp2 = fcmp nsz nnan fast olt float %x, 2.550000e+02
+ %B1 = urem i1 %cmp2, %cmp2
+ %min = select nsz nnan i1 %B1, float %x, float 2.550000e+02
+ %B = frem float %min, 0x47EFFFFFE0000000
+ %cmp1 = fcmp nsz nnan fast olt float %B, 1.000000e+00
+ %r = select nsz nnan i1 %cmp1, float 1.000000e+00, float %min
+ ret float %r
+}
diff --git a/llvm/test/CodeGen/X86/sse-minmax.ll b/llvm/test/CodeGen/X86/sse-minmax.ll
index 7904b21a3b1fa..2b97f98450973 100644
--- a/llvm/test/CodeGen/X86/sse-minmax.ll
+++ b/llvm/test/CodeGen/X86/sse-minmax.ll
@@ -1,7 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefix=ALL --check-prefix=STRICT
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 -enable-no-signed-zeros-fp-math -enable-no-nans-fp-math | FileCheck %s --check-prefix=ALL --check-prefix=RELAX --check-prefix=UNSAFE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 -enable-no-nans-fp-math | FileCheck %s --check-prefix=ALL --check-prefix=RELAX --check-prefix=FINITE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s
; Some of these patterns can be matched as SSE min or max. Some of
; them can be matched provided that the operands are swapped.
@@ -14,972 +12,640 @@
; _inverse : swap the arms of the select.
define double @ogt(double %x, double %y) {
-; ALL-LABEL: ogt:
-; ALL: # %bb.0:
-; ALL-NEXT: maxsd %xmm1, %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: ogt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @olt(double %x, double %y) {
-; ALL-LABEL: olt:
-; ALL: # %bb.0:
-; ALL-NEXT: minsd %xmm1, %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: olt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @ogt_inverse(double %x, double %y) {
-; STRICT-LABEL: ogt_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ogt_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ogt_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ogt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @olt_inverse(double %x, double %y) {
-; STRICT-LABEL: olt_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: olt_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: olt_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: olt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @oge(double %x, double %y) {
-; STRICT-LABEL: oge:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: cmplesd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: oge:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: oge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: cmplesd %xmm2, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @ole(double %x, double %y) {
-; STRICT-LABEL: ole:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmplesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ole:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ole:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @oge_inverse(double %x, double %y) {
-; STRICT-LABEL: oge_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: cmplesd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: oge_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: oge_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: oge_inverse:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: cmplesd %xmm2, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @ole_inverse(double %x, double %y) {
-; STRICT-LABEL: ole_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmplesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ole_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ole_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: ole_inverse:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @ogt_x(double %x) {
-; ALL-LABEL: ogt_x:
-; ALL: # %bb.0:
-; ALL-NEXT: xorpd %xmm1, %xmm1
-; ALL-NEXT: maxsd %xmm1, %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: ogt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @olt_x(double %x) {
-; ALL-LABEL: olt_x:
-; ALL: # %bb.0:
-; ALL-NEXT: xorpd %xmm1, %xmm1
-; ALL-NEXT: minsd %xmm1, %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: olt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @ogt_inverse_x(double %x) {
-; STRICT-LABEL: ogt_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ogt_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ogt_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ogt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @olt_inverse_x(double %x) {
-; STRICT-LABEL: olt_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: olt_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: olt_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: olt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @oge_x(double %x) {
-; STRICT-LABEL: oge_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: cmplesd %xmm0, %xmm1
-; STRICT-NEXT: andpd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: oge_x:
-; RELAX: # %bb.0:
-; RELAX-NEXT: xorpd %xmm1, %xmm1
-; RELAX-NEXT: maxsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: oge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: cmplesd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @ole_x(double %x) {
-; STRICT-LABEL: ole_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmplesd %xmm1, %xmm2
-; STRICT-NEXT: andpd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ole_x:
-; RELAX: # %bb.0:
-; RELAX-NEXT: xorpd %xmm1, %xmm1
-; RELAX-NEXT: minsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ole_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmplesd %xmm1, %xmm2
+; CHECK-NEXT: andpd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @oge_inverse_x(double %x) {
-; STRICT-LABEL: oge_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: cmplesd %xmm0, %xmm1
-; STRICT-NEXT: andnpd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: oge_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: oge_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: oge_inverse_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: minsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: cmplesd %xmm0, %xmm1
+; CHECK-NEXT: andnpd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @ole_inverse_x(double %x) {
-; STRICT-LABEL: ole_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm2, %xmm2
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmplesd %xmm2, %xmm1
-; STRICT-NEXT: andnpd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ole_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ole_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: ole_inverse_x:
+; RELAX: # %bb.0:
+; RELAX-NEXT: xorpd %xmm1, %xmm1
+; RELAX-NEXT: maxsd %xmm1, %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm2, %xmm2
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmplesd %xmm2, %xmm1
+; CHECK-NEXT: andnpd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @ugt(double %x, double %y) {
-; STRICT-LABEL: ugt:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ugt:
; RELAX: # %bb.0:
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ugt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @ult(double %x, double %y) {
-; STRICT-LABEL: ult:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: cmpnlesd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ult:
; RELAX: # %bb.0:
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ult:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: cmpnlesd %xmm2, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @ugt_inverse(double %x, double %y) {
-; STRICT-LABEL: ugt_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ugt_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ugt_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ugt_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @ult_inverse(double %x, double %y) {
-; STRICT-LABEL: ult_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: cmpnlesd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ult_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ult_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ult_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: cmpnlesd %xmm2, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @uge(double %x, double %y) {
-; STRICT-LABEL: uge:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: uge:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: uge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @ule(double %x, double %y) {
-; STRICT-LABEL: ule:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ule:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ule:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, %y
%d = select i1 %c, double %x, double %y
ret double %d
}
define double @uge_inverse(double %x, double %y) {
-; STRICT-LABEL: uge_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: uge_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: uge_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: uge_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @ule_inverse(double %x, double %y) {
-; STRICT-LABEL: ule_inverse:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ule_inverse:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ule_inverse:
-; FINITE: # %bb.0:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ule_inverse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, %y
%d = select i1 %c, double %y, double %x
ret double %d
}
define double @ugt_x(double %x) {
-; STRICT-LABEL: ugt_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmpnlesd %xmm1, %xmm2
-; STRICT-NEXT: andpd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ugt_x:
; RELAX: # %bb.0:
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: maxsd %xmm1, %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ugt_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmpnlesd %xmm1, %xmm2
+; CHECK-NEXT: andpd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @ult_x(double %x) {
-; STRICT-LABEL: ult_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT: andpd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ult_x:
; RELAX: # %bb.0:
; RELAX-NEXT: xorpd %xmm1, %xmm1
; RELAX-NEXT: minsd %xmm1, %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ult_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: cmpnlesd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @ugt_inverse_x(double %x) {
-; STRICT-LABEL: ugt_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm2, %xmm2
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT: andnpd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ugt_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ugt_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ugt_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm2, %xmm2
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmpnlesd %xmm2, %xmm1
+; CHECK-NEXT: andnpd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @ult_inverse_x(double %x) {
-; STRICT-LABEL: ult_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT: andnpd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ult_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ult_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ult_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: cmpnlesd %xmm0, %xmm1
+; CHECK-NEXT: andnpd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @uge_x(double %x) {
-; STRICT-LABEL: uge_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: uge_x:
-; RELAX: # %bb.0:
-; RELAX-NEXT: xorpd %xmm1, %xmm1
-; RELAX-NEXT: maxsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: uge_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @ule_x(double %x) {
-; STRICT-LABEL: ule_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ule_x:
-; RELAX: # %bb.0:
-; RELAX-NEXT: xorpd %xmm1, %xmm1
-; RELAX-NEXT: minsd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ule_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, 0.000000e+00
%d = select i1 %c, double %x, double 0.000000e+00
ret double %d
}
define double @uge_inverse_x(double %x) {
-; STRICT-LABEL: uge_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: minsd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: uge_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: uge_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: uge_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @ule_inverse_x(double %x) {
-; STRICT-LABEL: ule_inverse_x:
-; STRICT: # %bb.0:
-; STRICT-NEXT: xorpd %xmm1, %xmm1
-; STRICT-NEXT: maxsd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ule_inverse_x:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: xorpd %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ule_inverse_x:
-; FINITE: # %bb.0:
-; FINITE-NEXT: xorpd %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ule_inverse_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, 0.000000e+00
%d = select i1 %c, double 0.000000e+00, double %x
ret double %d
}
define double @ogt_y(double %x) {
-; ALL-LABEL: ogt_y:
-; ALL: # %bb.0:
-; ALL-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: ogt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @olt_y(double %x) {
-; ALL-LABEL: olt_y:
-; ALL: # %bb.0:
-; ALL-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; ALL-NEXT: retq
+; CHECK-LABEL: olt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @ogt_inverse_y(double %x) {
-; STRICT-LABEL: ogt_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ogt_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ogt_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ogt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ogt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @olt_inverse_y(double %x) {
-; STRICT-LABEL: olt_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: olt_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: olt_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: olt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp olt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @oge_y(double %x) {
-; STRICT-LABEL: oge_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: cmplesd %xmm1, %xmm0
-; STRICT-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: oge_y:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: oge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @ole_y(double %x) {
-; STRICT-LABEL: ole_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ole_y:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ole_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @oge_inverse_y(double %x) {
-; STRICT-LABEL: oge_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: cmplesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: oge_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: oge_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: oge_inverse_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: oge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp oge double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @ole_inverse_y(double %x) {
-; STRICT-LABEL: ole_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ole_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ole_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; RELAX-LABEL: ole_inverse_y:
+; RELAX: # %bb.0:
+; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; RELAX-NEXT: retq
+; CHECK-LABEL: ole_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ole double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @ugt_y(double %x) {
-; STRICT-LABEL: ugt_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ugt_y:
; RELAX: # %bb.0:
; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ugt_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @ult_y(double %x) {
-; STRICT-LABEL: ult_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT: movapd %xmm2, %xmm0
-; STRICT-NEXT: retq
-;
; RELAX-LABEL: ult_y:
; RELAX: # %bb.0:
; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; RELAX-NEXT: retq
+; CHECK-LABEL: ult_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT: movapd %xmm2, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @ugt_inverse_y(double %x) {
-; STRICT-LABEL: ugt_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ugt_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ugt_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ugt_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ugt double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @ult_inverse_y(double %x) {
-; STRICT-LABEL: ult_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm1
-; STRICT-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ult_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ult_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ult_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ult double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @uge_y(double %x) {
-; STRICT-LABEL: uge_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: uge_y:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: uge_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @ule_y(double %x) {
-; STRICT-LABEL: ule_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: ule_y:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: ule_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, -0.000000e+00
%d = select i1 %c, double %x, double -0.000000e+00
ret double %d
}
define double @uge_inverse_y(double %x) {
-; STRICT-LABEL: uge_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: uge_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: uge_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: uge_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%c = fcmp uge double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
}
define double @ule_inverse_y(double %x) {
-; STRICT-LABEL: ule_inverse_y:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: ule_inverse_y:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: ule_inverse_y:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: ule_inverse_y:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%c = fcmp ule double %x, -0.000000e+00
%d = select i1 %c, double -0.000000e+00, double %x
ret double %d
@@ -988,332 +654,196 @@ define double @ule_inverse_y(double %x) {
; Test a few more misc. cases.
define double @clampTo3k_a(double %x) {
-; STRICT-LABEL: clampTo3k_a:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_a:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_a:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_a:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp ogt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_b(double %x) {
-; STRICT-LABEL: clampTo3k_b:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_b:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_b:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp uge double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_c(double %x) {
-; STRICT-LABEL: clampTo3k_c:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_c:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_c:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_c:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp olt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_d(double %x) {
-; STRICT-LABEL: clampTo3k_d:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_d:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_d:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp ule double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_e(double %x) {
-; STRICT-LABEL: clampTo3k_e:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT: maxsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_e:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_e:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_e:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp olt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_f(double %x) {
-; STRICT-LABEL: clampTo3k_f:
-; STRICT: # %bb.0:
-; STRICT-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_f:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_f:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_f:
+; CHECK: # %bb.0:
+; CHECK-NEXT: maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp ule double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_g(double %x) {
-; STRICT-LABEL: clampTo3k_g:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT: minsd %xmm0, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_g:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_g:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_g:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp ogt double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define double @clampTo3k_h(double %x) {
-; STRICT-LABEL: clampTo3k_h:
-; STRICT: # %bb.0:
-; STRICT-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT: retq
-;
-; UNSAFE-LABEL: clampTo3k_h:
-; UNSAFE: # %bb.0:
-; UNSAFE-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT: retq
-;
-; FINITE-LABEL: clampTo3k_h:
-; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: retq
+; CHECK-LABEL: clampTo3k_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT: retq
%t0 = fcmp uge double %x, 3.000000e+03
%y = select i1 %t0, double 3.000000e+03, double %x
ret double %y
}
define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
-; STRICT-LABEL: test_maxpd:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: cmplepd %xmm2, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_maxpd:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxpd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_maxpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: cmplepd %xmm2, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%max_is_x = fcmp oge <2 x double> %x, %y
%max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %max
}
define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
-; STRICT-LABEL: test_minpd:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movapd %xmm0, %xmm2
-; STRICT-NEXT: cmplepd %xmm1, %xmm0
-; STRICT-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movapd %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_minpd:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minpd %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_minpd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movapd %xmm0, %xmm2
+; CHECK-NEXT: cmplepd %xmm1, %xmm0
+; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: retq
%min_is_x = fcmp ole <2 x double> %x, %y
%min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %min
}
define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
-; STRICT-LABEL: test_maxps:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: cmpleps %xmm2, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_maxps:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_maxps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: cmpleps %xmm2, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%max_is_x = fcmp oge <4 x float> %x, %y
%max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %max
}
define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
-; STRICT-LABEL: test_minps:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: cmpleps %xmm1, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_minps:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_minps:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: cmpleps %xmm1, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%min_is_x = fcmp ole <4 x float> %x, %y
%min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %min
}
define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
-; STRICT-LABEL: test_maxps_illegal_v2f32:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: cmpleps %xmm2, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_maxps_illegal_v2f32:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_maxps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: cmpleps %xmm2, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%max_is_x = fcmp oge <2 x float> %x, %y
%max = select <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
ret <2 x float> %max
}
define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
-; STRICT-LABEL: test_minps_illegal_v2f32:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: cmpleps %xmm1, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_minps_illegal_v2f32:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_minps_illegal_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: cmpleps %xmm1, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%min_is_x = fcmp ole <2 x float> %x, %y
%min = select <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
ret <2 x float> %min
}
define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
-; STRICT-LABEL: test_maxps_illegal_v3f32:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: cmpleps %xmm2, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_maxps_illegal_v3f32:
-; RELAX: # %bb.0:
-; RELAX-NEXT: maxps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_maxps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: cmpleps %xmm2, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%max_is_x = fcmp oge <3 x float> %x, %y
%max = select <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
ret <3 x float> %max
}
define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
-; STRICT-LABEL: test_minps_illegal_v3f32:
-; STRICT: # %bb.0:
-; STRICT-NEXT: movaps %xmm0, %xmm2
-; STRICT-NEXT: cmpleps %xmm1, %xmm0
-; STRICT-NEXT: blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT: movaps %xmm1, %xmm0
-; STRICT-NEXT: retq
-;
-; RELAX-LABEL: test_minps_illegal_v3f32:
-; RELAX: # %bb.0:
-; RELAX-NEXT: minps %xmm1, %xmm0
-; RELAX-NEXT: retq
+; CHECK-LABEL: test_minps_illegal_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movaps %xmm0, %xmm2
+; CHECK-NEXT: cmpleps %xmm1, %xmm0
+; CHECK-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%min_is_x = fcmp ole <3 x float> %x, %y
%min = select <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
ret <3 x float> %min
@@ -1322,10 +852,10 @@ define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
; OSS-Fuzz #13838
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13838
define float @ossfuzz13838(float %x) {
-; ALL-LABEL: ossfuzz13838:
-; ALL: # %bb.0: # %bb
-; ALL-NEXT: movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
-; ALL-NEXT: retq
+; CHECK-LABEL: ossfuzz13838:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: retq
bb:
%cmp2 = fcmp fast olt float %x, 2.550000e+02
%B1 = urem i1 %cmp2, %cmp2
>From a0735beaf6791b3c872b205595a5267fa1dbf9ad Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Fri, 28 Nov 2025 21:52:33 +0800
Subject: [PATCH 04/12] Fix PowerPC tests
---
llvm/test/CodeGen/PowerPC/change-no-infs.ll | 67 -
llvm/test/CodeGen/PowerPC/fsel.ll | 20 +-
llvm/test/CodeGen/PowerPC/scalar-equal.ll | 110 +-
llvm/test/CodeGen/PowerPC/scalar_cmp.ll | 1494 +++++++------------
4 files changed, 621 insertions(+), 1070 deletions(-)
delete mode 100644 llvm/test/CodeGen/PowerPC/change-no-infs.ll
diff --git a/llvm/test/CodeGen/PowerPC/change-no-infs.ll b/llvm/test/CodeGen/PowerPC/change-no-infs.ll
deleted file mode 100644
index 0cd5eb5408e3e..0000000000000
--- a/llvm/test/CodeGen/PowerPC/change-no-infs.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; Check that we can enable/disable NoInfsFPMath and NoNaNsInFPMath via function
-; attributes. An attribute on one function should not magically apply to the
-; next one.
-
-; RUN: llc < %s -mtriple=powerpc64-unknown-unknown -mcpu=pwr7 -mattr=-vsx \
-; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=SAFE
-
-; RUN: llc < %s -mtriple=powerpc64-unknown-unknown -mcpu=pwr7 -mattr=-vsx \
-; RUN: -enable-no-infs-fp-math -enable-no-nans-fp-math \
-; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=UNSAFE
-
-; The fcmp+select in these functions should be converted to a fsel instruction
-; when both NoInfsFPMath and NoNaNsInFPMath are enabled.
-
-; CHECK-LABEL: default0:
-define double @default0(double %a, double %y, double %z) {
-entry:
-; SAFE-NOT: fsel
-; UNSAFE: fsel
- %cmp = fcmp ult double %a, 0.000000e+00
- %z.y = select i1 %cmp, double %z, double %y
- ret double %z.y
-}
-
-; CHECK-LABEL: unsafe_math_off:
-define double @unsafe_math_off(double %a, double %y, double %z) #0 #2 {
-entry:
-; SAFE-NOT: fsel
-; UNSAFE-NOT: fsel
- %cmp = fcmp ult double %a, 0.000000e+00
- %z.y = select i1 %cmp, double %z, double %y
- ret double %z.y
-}
-
-; CHECK-LABEL: default1:
-define double @default1(double %a, double %y, double %z) {
-; SAFE-NOT: fsel
-; UNSAFE: fsel
- %cmp = fcmp ult double %a, 0.000000e+00
- %z.y = select i1 %cmp, double %z, double %y
- ret double %z.y
-}
-
-; CHECK-LABEL: unsafe_math_on:
-define double @unsafe_math_on(double %a, double %y, double %z) #1 #3 {
-entry:
-; SAFE-NOT: fsel
-; UNSAFE-NOT: fsel
- %cmp = fcmp ult double %a, 0.000000e+00
- %z.y = select i1 %cmp, double %z, double %y
- ret double %z.y
-}
-
-; CHECK-LABEL: default2:
-define double @default2(double %a, double %y, double %z) {
-; SAFE-NOT: fsel
-; UNSAFE: fsel
- %cmp = fcmp ult double %a, 0.000000e+00
- %z.y = select i1 %cmp, double %z, double %y
- ret double %z.y
-}
-
-attributes #0 = { "no-infs-fp-math"="false" }
-attributes #1 = { "no-nans-fp-math"="false" }
-
-attributes #2 = { "no-infs-fp-math"="false" }
-attributes #3 = { "no-infs-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/fsel.ll b/llvm/test/CodeGen/PowerPC/fsel.ll
index dea442d8404e1..13d94d1c28822 100644
--- a/llvm/test/CodeGen/PowerPC/fsel.ll
+++ b/llvm/test/CodeGen/PowerPC/fsel.ll
@@ -1,12 +1,12 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -enable-no-nans-fp-math -mattr=-vsx | FileCheck -check-prefix=CHECK-FM %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -enable-no-nans-fp-math -mattr=+vsx | FileCheck -check-prefix=CHECK-FM-VSX %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -mattr=-vsx | FileCheck -check-prefix=CHECK-FM %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -mattr=+vsx | FileCheck -check-prefix=CHECK-FM-VSX %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
define double @zerocmp1(double %a, double %y, double %z) #0 {
entry:
- %cmp = fcmp ult double %a, 0.000000e+00
+ %cmp = fcmp nnan ult double %a, 0.000000e+00
%z.y = select i1 %cmp, double %z, double %y
ret double %z.y
@@ -25,7 +25,7 @@ entry:
define double @zerocmp2(double %a, double %y, double %z) #0 {
entry:
- %cmp = fcmp ogt double %a, 0.000000e+00
+ %cmp = fcmp nnan ogt double %a, 0.000000e+00
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
@@ -46,7 +46,7 @@ entry:
define double @zerocmp3(double %a, double %y, double %z) #0 {
entry:
- %cmp = fcmp oeq double %a, 0.000000e+00
+ %cmp = fcmp nnan oeq double %a, 0.000000e+00
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
@@ -69,7 +69,7 @@ entry:
define double @min1(double %a, double %b) #0 {
entry:
- %cmp = fcmp ole double %a, %b
+ %cmp = fcmp nnan ole double %a, %b
%cond = select i1 %cmp, double %a, double %b
ret double %cond
@@ -90,7 +90,7 @@ entry:
define double @max1(double %a, double %b) #0 {
entry:
- %cmp = fcmp oge double %a, %b
+ %cmp = fcmp nnan oge double %a, %b
%cond = select i1 %cmp, double %a, double %b
ret double %cond
@@ -111,7 +111,7 @@ entry:
define double @cmp1(double %a, double %b, double %y, double %z) #0 {
entry:
- %cmp = fcmp ult double %a, %b
+ %cmp = fcmp nnan ult double %a, %b
%z.y = select i1 %cmp, double %z, double %y
ret double %z.y
@@ -132,7 +132,7 @@ entry:
define double @cmp2(double %a, double %b, double %y, double %z) #0 {
entry:
- %cmp = fcmp ogt double %a, %b
+ %cmp = fcmp nnan ogt double %a, %b
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
@@ -153,7 +153,7 @@ entry:
define double @cmp3(double %a, double %b, double %y, double %z) #0 {
entry:
- %cmp = fcmp oeq double %a, %b
+ %cmp = fcmp nnan oeq double %a, %b
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
diff --git a/llvm/test/CodeGen/PowerPC/scalar-equal.ll b/llvm/test/CodeGen/PowerPC/scalar-equal.ll
index c0b11b47236a9..de829b5d54dee 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-equal.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-equal.ll
@@ -1,57 +1,31 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \
-; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
-; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=FAST-P8
-; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names \
-; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
-; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=FAST-P9
; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=NO-FAST-P9
+; RUN: --check-prefix=P9
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=NO-FAST-P8
+; RUN: --check-prefix=P8
define double @testoeq(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: testoeq:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: testoeq:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
+; P9-LABEL: testoeq:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: beq cr0, .LBB0_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB0_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
;
-; NO-FAST-P9-LABEL: testoeq:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: beq cr0, .LBB0_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB0_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: testoeq:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: beq cr0, .LBB0_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB0_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
+; P8-LABEL: testoeq:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: beq cr0, .LBB0_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB0_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
entry:
%cmp = fcmp oeq double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -59,37 +33,21 @@ entry:
}
define double @testoeq_fast(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: testoeq_fast:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: testoeq_fast:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P9-LABEL: testoeq_fast:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: xsnegdp f0, f0
-; NO-FAST-P9-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P9-NEXT: blr
+; P9-LABEL: testoeq_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: xsnegdp f0, f0
+; P9-NEXT: fsel f1, f0, f1, f4
+; P9-NEXT: blr
;
-; NO-FAST-P8-LABEL: testoeq_fast:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: xsnegdp f0, f0
-; NO-FAST-P8-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P8-NEXT: blr
+; P8-LABEL: testoeq_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: xsnegdp f0, f0
+; P8-NEXT: fsel f1, f0, f1, f4
+; P8-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz oeq double %a, %b
%cond = select nnan ninf nsz i1 %cmp, double %c, double %d
diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
index 881d1f4c4093b..878b7f2449141 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
@@ -1,58 +1,36 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \
; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
+; RUN: --enable-no-nans-fp-math \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=FAST-P8
+; RUN: --check-prefix=P8
; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names \
; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
+; RUN: --enable-no-nans-fp-math \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=FAST-P9
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \
-; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=NO-FAST-P8
-; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \
-; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=NO-FAST-P9
+; RUN: --check-prefix=P9
; Test oeq
define float @select_oeq_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_oeq_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: xsnegdp f1, f0
-; FAST-P8-NEXT: fsel f0, f0, f3, f4
-; FAST-P8-NEXT: fsel f1, f1, f0, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_oeq_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: xsnegdp f1, f0
-; FAST-P9-NEXT: fsel f0, f0, f3, f4
-; FAST-P9-NEXT: fsel f1, f1, f0, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_oeq_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: beq cr0, .LBB0_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB0_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_oeq_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: beq cr0, .LBB0_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB0_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_oeq_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: beq cr0, .LBB0_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB0_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_oeq_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: beq cr0, .LBB0_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB0_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp oeq float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -60,41 +38,25 @@ entry:
}
define float @select_oeq_float_nsz(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_oeq_float_nsz:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: xssubsp f1, f1, f2
-; FAST-P8-NEXT: fsel f1, f1, f3, f4
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_oeq_float_nsz:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: xssubsp f1, f1, f2
-; FAST-P9-NEXT: fsel f1, f1, f3, f4
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_oeq_float_nsz:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: beq cr0, .LBB1_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB1_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_oeq_float_nsz:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: beq cr0, .LBB1_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB1_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_oeq_float_nsz:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: beq cr0, .LBB1_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB1_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_oeq_float_nsz:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: beq cr0, .LBB1_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB1_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nsz oeq float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -102,41 +64,25 @@ entry:
}
define double @select_oeq_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_oeq_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_oeq_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_oeq_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: beq cr0, .LBB2_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB2_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_oeq_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: beq cr0, .LBB2_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB2_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_oeq_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: beq cr0, .LBB2_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB2_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_oeq_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: beq cr0, .LBB2_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB2_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp oeq double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -144,37 +90,21 @@ entry:
}
define float @select_fast_oeq_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_oeq_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: xssubsp f1, f1, f2
-; FAST-P8-NEXT: fsel f1, f1, f3, f4
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_oeq_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: xssubsp f1, f1, f2
-; FAST-P9-NEXT: fsel f1, f1, f3, f4
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_oeq_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT: xssubsp f1, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f1, f3, f4
-; NO-FAST-P8-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_oeq_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT: xssubsp f1, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f1, f3, f4
-; NO-FAST-P9-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_oeq_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f2, f1
+; P8-NEXT: xssubsp f1, f1, f2
+; P8-NEXT: fsel f1, f1, f3, f4
+; P8-NEXT: fsel f1, f0, f1, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_oeq_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f2, f1
+; P9-NEXT: xssubsp f1, f1, f2
+; P9-NEXT: fsel f1, f1, f3, f4
+; P9-NEXT: fsel f1, f0, f1, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz oeq float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -182,37 +112,21 @@ entry:
}
define double @select_fast_oeq_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_oeq_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_oeq_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_oeq_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: xsnegdp f0, f0
-; NO-FAST-P8-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_oeq_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: xsnegdp f0, f0
-; NO-FAST-P9-NEXT: fsel f1, f0, f1, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_oeq_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: xsnegdp f0, f0
+; P8-NEXT: fsel f1, f0, f1, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_oeq_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: xsnegdp f0, f0
+; P9-NEXT: fsel f1, f0, f1, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz oeq double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -222,43 +136,27 @@ entry:
; Test one
define float @select_one_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_one_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: xsnegdp f1, f0
-; FAST-P8-NEXT: fsel f0, f0, f4, f3
-; FAST-P8-NEXT: fsel f1, f1, f0, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_one_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: xsnegdp f1, f0
-; FAST-P9-NEXT: fsel f0, f0, f4, f3
-; FAST-P9-NEXT: fsel f1, f1, f0, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_one_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB5_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_one_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB5_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_one_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, eq
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB5_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_one_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, eq
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB5_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp one float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -266,43 +164,27 @@ entry:
}
define float @select_one_float_nsz(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_one_float_nsz:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: xssubsp f1, f1, f2
-; FAST-P8-NEXT: fsel f1, f1, f4, f3
-; FAST-P8-NEXT: fsel f1, f0, f1, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_one_float_nsz:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: xssubsp f1, f1, f2
-; FAST-P9-NEXT: fsel f1, f1, f4, f3
-; FAST-P9-NEXT: fsel f1, f0, f1, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_one_float_nsz:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB6_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_one_float_nsz:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB6_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_one_float_nsz:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, eq
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB6_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_one_float_nsz:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, eq
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB6_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nsz one float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -310,43 +192,27 @@ entry:
}
define double @select_one_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_one_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_one_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_one_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB7_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_one_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB7_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_one_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, eq
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB7_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_one_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, eq
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB7_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp one double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -354,37 +220,21 @@ entry:
}
define float @select_fast_one_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_one_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: xssubsp f1, f1, f2
-; FAST-P8-NEXT: fsel f1, f1, f4, f3
-; FAST-P8-NEXT: fsel f1, f0, f1, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_one_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: xssubsp f1, f1, f2
-; FAST-P9-NEXT: fsel f1, f1, f4, f3
-; FAST-P9-NEXT: fsel f1, f0, f1, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_one_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT: xssubsp f1, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f1, f4, f3
-; NO-FAST-P8-NEXT: fsel f1, f0, f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_one_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT: xssubsp f1, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f1, f4, f3
-; NO-FAST-P9-NEXT: fsel f1, f0, f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_one_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f2, f1
+; P8-NEXT: xssubsp f1, f1, f2
+; P8-NEXT: fsel f1, f1, f4, f3
+; P8-NEXT: fsel f1, f0, f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_one_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f2, f1
+; P9-NEXT: xssubsp f1, f1, f2
+; P9-NEXT: fsel f1, f1, f4, f3
+; P9-NEXT: fsel f1, f0, f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz one float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -392,37 +242,21 @@ entry:
}
define double @select_fast_one_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_one_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_one_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_one_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT: xsnegdp f0, f0
-; NO-FAST-P8-NEXT: fsel f1, f0, f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_one_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT: xsnegdp f0, f0
-; NO-FAST-P9-NEXT: fsel f1, f0, f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_one_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f4, f3
+; P8-NEXT: xsnegdp f0, f0
+; P8-NEXT: fsel f1, f0, f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_one_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f4, f3
+; P9-NEXT: xsnegdp f0, f0
+; P9-NEXT: fsel f1, f0, f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz one double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -432,39 +266,27 @@ entry:
; Test oge
define float @select_oge_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_oge_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_oge_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_oge_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB10_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_oge_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB10_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_oge_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, lt
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB10_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_oge_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, lt
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB10_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp oge float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -472,39 +294,27 @@ entry:
}
define double @select_oge_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_oge_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_oge_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_oge_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB11_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_oge_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB11_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_oge_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, lt
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB11_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_oge_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, lt
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB11_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp oge double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -512,29 +322,17 @@ entry:
}
define float @select_fast_oge_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_oge_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_oge_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_oge_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_oge_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_oge_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_oge_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz oge float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -542,29 +340,17 @@ entry:
}
define double @select_fast_oge_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_oge_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_oge_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_oge_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_oge_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_oge_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_oge_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz oge double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -574,37 +360,25 @@ entry:
; Test olt
define float @select_olt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_olt_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_olt_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_olt_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: blt cr0, .LBB14_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB14_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_olt_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: blt cr0, .LBB14_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB14_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_olt_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: blt cr0, .LBB14_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB14_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_olt_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: blt cr0, .LBB14_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB14_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp olt float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -612,37 +386,25 @@ entry:
}
define double @select_olt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_olt_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_olt_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_olt_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: blt cr0, .LBB15_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB15_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_olt_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: blt cr0, .LBB15_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB15_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_olt_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: blt cr0, .LBB15_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB15_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_olt_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: blt cr0, .LBB15_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB15_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp olt double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -650,29 +412,17 @@ entry:
}
define float @select_fast_olt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_olt_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_olt_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_olt_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_olt_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_olt_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f4, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_olt_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f4, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ninf nnan nsz olt float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -680,29 +430,17 @@ entry:
}
define double @select_fast_olt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_olt_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f1, f2
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_olt_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f1, f2
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_olt_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_olt_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_olt_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f1, f2
+; P8-NEXT: fsel f1, f0, f4, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_olt_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f1, f2
+; P9-NEXT: fsel f1, f0, f4, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz olt double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -712,37 +450,25 @@ entry:
; Test ogt
define float @select_ogt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_ogt_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_ogt_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_ogt_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: bgt cr0, .LBB18_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB18_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_ogt_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: bgt cr0, .LBB18_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB18_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_ogt_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: bgt cr0, .LBB18_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB18_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_ogt_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: bgt cr0, .LBB18_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB18_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ogt float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -750,37 +476,25 @@ entry:
}
define double @select_ogt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_ogt_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_ogt_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_ogt_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: bgt cr0, .LBB19_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB19_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_ogt_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: bgt cr0, .LBB19_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB19_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_ogt_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: bgt cr0, .LBB19_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB19_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_ogt_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xscmpudp cr0, f1, f2
+; P9-NEXT: bgt cr0, .LBB19_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB19_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ogt double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -788,29 +502,17 @@ entry:
}
define float @select_fast_ogt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_ogt_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_ogt_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_ogt_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_ogt_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_ogt_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f2, f1
+; P8-NEXT: fsel f1, f0, f4, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_ogt_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f2, f1
+; P9-NEXT: fsel f1, f0, f4, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz ogt float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -818,29 +520,17 @@ entry:
}
define double @select_fast_ogt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_ogt_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f4, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_ogt_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f4, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_ogt_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f2, f1
-; NO-FAST-P8-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_ogt_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f2, f1
-; NO-FAST-P9-NEXT: fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_ogt_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f2, f1
+; P8-NEXT: fsel f1, f0, f4, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_ogt_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f2, f1
+; P9-NEXT: fsel f1, f0, f4, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz ogt double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -850,39 +540,27 @@ entry:
; Test ole
define float @select_ole_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_ole_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_ole_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_ole_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB22_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_ole_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB22_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_ole_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, gt
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB22_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_ole_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, gt
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB22_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ole float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -890,39 +568,27 @@ entry:
}
define double @select_ole_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_ole_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_ole_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_ole_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB23_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_ole_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB23_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_ole_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: crnor 4*cr5+lt, un, gt
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f3, f4
+; P8-NEXT: .LBB23_2: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: select_ole_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: fcmpu cr0, f1, f2
+; P9-NEXT: crnor 4*cr5+lt, un, gt
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f3, f4
+; P9-NEXT: .LBB23_2: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ole double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -930,29 +596,17 @@ entry:
}
define float @select_fast_ole_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_ole_float:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_ole_float:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_ole_float:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_ole_float:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_ole_float:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubsp f0, f2, f1
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_ole_float:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubsp f0, f2, f1
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz ole float %a, %b
%cond = select i1 %cmp, float %c, float %d
@@ -960,29 +614,17 @@ entry:
}
define double @select_fast_ole_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_ole_double:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubdp f0, f2, f1
-; FAST-P8-NEXT: fsel f1, f0, f3, f4
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: select_fast_ole_double:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubdp f0, f2, f1
-; FAST-P9-NEXT: fsel f1, f0, f3, f4
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: select_fast_ole_double:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f2, f1
-; NO-FAST-P8-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: select_fast_ole_double:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xssubdp f0, f2, f1
-; NO-FAST-P9-NEXT: fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: select_fast_ole_double:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xssubdp f0, f2, f1
+; P8-NEXT: fsel f1, f0, f3, f4
+; P8-NEXT: blr
+;
+; P9-LABEL: select_fast_ole_double:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xssubdp f0, f2, f1
+; P9-NEXT: fsel f1, f0, f3, f4
+; P9-NEXT: blr
entry:
%cmp = fcmp nnan ninf nsz ole double %a, %b
%cond = select i1 %cmp, double %c, double %d
@@ -991,149 +633,167 @@ entry:
; Test RHS is 1.000000e+00
define double @onecmp1(double %a, double %y, double %z) {
-; FAST-P8-LABEL: onecmp1:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: vspltisw v2, -1
-; FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P8-NEXT: xsadddp f0, f1, f0
-; FAST-P8-NEXT: fsel f1, f0, f2, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: onecmp1:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: vspltisw v2, -1
-; FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P9-NEXT: xsadddp f0, f1, f0
-; FAST-P9-NEXT: fsel f1, f0, f2, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: onecmp1:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: vspltisw v2, 1
-; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f0
-; NO-FAST-P8-NEXT: bc 12, lt, .LBB26_3
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f1
-; NO-FAST-P8-NEXT: bc 12, un, .LBB26_3
-; NO-FAST-P8-NEXT: # %bb.2: # %entry
-; NO-FAST-P8-NEXT: fmr f3, f2
-; NO-FAST-P8-NEXT: .LBB26_3: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f3
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: onecmp1:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: vspltisw v2, 1
-; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f0
-; NO-FAST-P9-NEXT: bc 12, lt, .LBB26_3
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fcmpu cr0, f1, f1
-; NO-FAST-P9-NEXT: bc 12, un, .LBB26_3
-; NO-FAST-P9-NEXT: # %bb.2: # %entry
-; NO-FAST-P9-NEXT: fmr f3, f2
-; NO-FAST-P9-NEXT: .LBB26_3: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f3
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: onecmp1:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, 1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: fcmpu cr0, f1, f0
+; P8-NEXT: bc 12, lt, .LBB26_3
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fcmpu cr0, f1, f1
+; P8-NEXT: bc 12, un, .LBB26_3
+; P8-NEXT: # %bb.2: # %entry
+; P8-NEXT: fmr f3, f2
+; P8-NEXT: .LBB26_3: # %entry
+; P8-NEXT: fmr f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp1:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, 1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: fcmpu cr0, f1, f0
+; P9-NEXT: bc 12, lt, .LBB26_3
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fcmpu cr0, f1, f1
+; P9-NEXT: bc 12, un, .LBB26_3
+; P9-NEXT: # %bb.2: # %entry
+; P9-NEXT: fmr f3, f2
+; P9-NEXT: .LBB26_3: # %entry
+; P9-NEXT: fmr f1, f3
+; P9-NEXT: blr
entry:
%cmp = fcmp ult double %a, 1.000000e+00
%z.y = select i1 %cmp, double %z, double %y
ret double %z.y
}
+define double @onecmp1_fast(double %a, double %y, double %z) {
+; P8-LABEL: onecmp1_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, -1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: xsadddp f0, f1, f0
+; P8-NEXT: fsel f1, f0, f2, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp1_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, -1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: xsadddp f0, f1, f0
+; P9-NEXT: fsel f1, f0, f2, f3
+; P9-NEXT: blr
+entry:
+ %cmp = fcmp nnan ninf nsz ult double %a, 1.000000e+00
+ %z.y = select i1 %cmp, double %z, double %y
+ ret double %z.y
+}
+
define double @onecmp2(double %a, double %y, double %z) {
-; FAST-P8-LABEL: onecmp2:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: vspltisw v2, 1
-; FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P8-NEXT: xssubdp f0, f0, f1
-; FAST-P8-NEXT: fsel f1, f0, f3, f2
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: onecmp2:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: vspltisw v2, 1
-; FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P9-NEXT: xssubdp f0, f0, f1
-; FAST-P9-NEXT: fsel f1, f0, f3, f2
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: onecmp2:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: vspltisw v2, 1
-; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P8-NEXT: bgt cr0, .LBB27_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f2, f3
-; NO-FAST-P8-NEXT: .LBB27_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f2
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: onecmp2:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: vspltisw v2, 1
-; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P9-NEXT: bgt cr0, .LBB27_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f2, f3
-; NO-FAST-P9-NEXT: .LBB27_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f2
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: onecmp2:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, 1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: xscmpudp cr0, f1, f0
+; P8-NEXT: bgt cr0, .LBB28_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f2, f3
+; P8-NEXT: .LBB28_2: # %entry
+; P8-NEXT: fmr f1, f2
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp2:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, 1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: xscmpudp cr0, f1, f0
+; P9-NEXT: bgt cr0, .LBB28_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f2, f3
+; P9-NEXT: .LBB28_2: # %entry
+; P9-NEXT: fmr f1, f2
+; P9-NEXT: blr
entry:
%cmp = fcmp ogt double %a, 1.000000e+00
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
}
+define double @onecmp2_fast(double %a, double %y, double %z) {
+; P8-LABEL: onecmp2_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, 1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: xssubdp f0, f0, f1
+; P8-NEXT: fsel f1, f0, f3, f2
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp2_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, 1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: xssubdp f0, f0, f1
+; P9-NEXT: fsel f1, f0, f3, f2
+; P9-NEXT: blr
+entry:
+ %cmp = fcmp nnan ninf nsz ogt double %a, 1.000000e+00
+ %y.z = select i1 %cmp, double %y, double %z
+ ret double %y.z
+}
+
define double @onecmp3(double %a, double %y, double %z) {
-; FAST-P8-LABEL: onecmp3:
-; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: vspltisw v2, -1
-; FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P8-NEXT: xsadddp f0, f1, f0
-; FAST-P8-NEXT: fsel f1, f0, f2, f3
-; FAST-P8-NEXT: xsnegdp f0, f0
-; FAST-P8-NEXT: fsel f1, f0, f1, f3
-; FAST-P8-NEXT: blr
-;
-; FAST-P9-LABEL: onecmp3:
-; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: vspltisw v2, -1
-; FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; FAST-P9-NEXT: xsadddp f0, f1, f0
-; FAST-P9-NEXT: fsel f1, f0, f2, f3
-; FAST-P9-NEXT: xsnegdp f0, f0
-; FAST-P9-NEXT: fsel f1, f0, f1, f3
-; FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: onecmp3:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: vspltisw v2, 1
-; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P8-NEXT: beq cr0, .LBB28_2
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f2, f3
-; NO-FAST-P8-NEXT: .LBB28_2: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f2
-; NO-FAST-P8-NEXT: blr
-;
-; NO-FAST-P9-LABEL: onecmp3:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: vspltisw v2, 1
-; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
-; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P9-NEXT: beq cr0, .LBB28_2
-; NO-FAST-P9-NEXT: # %bb.1: # %entry
-; NO-FAST-P9-NEXT: fmr f2, f3
-; NO-FAST-P9-NEXT: .LBB28_2: # %entry
-; NO-FAST-P9-NEXT: fmr f1, f2
-; NO-FAST-P9-NEXT: blr
+; P8-LABEL: onecmp3:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, 1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: xscmpudp cr0, f1, f0
+; P8-NEXT: beq cr0, .LBB30_2
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f2, f3
+; P8-NEXT: .LBB30_2: # %entry
+; P8-NEXT: fmr f1, f2
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp3:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, 1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: xscmpudp cr0, f1, f0
+; P9-NEXT: beq cr0, .LBB30_2
+; P9-NEXT: # %bb.1: # %entry
+; P9-NEXT: fmr f2, f3
+; P9-NEXT: .LBB30_2: # %entry
+; P9-NEXT: fmr f1, f2
+; P9-NEXT: blr
entry:
%cmp = fcmp oeq double %a, 1.000000e+00
%y.z = select i1 %cmp, double %y, double %z
ret double %y.z
}
+
+define double @onecmp3_fast(double %a, double %y, double %z) {
+; P8-LABEL: onecmp3_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: vspltisw v2, -1
+; P8-NEXT: xvcvsxwdp vs0, vs34
+; P8-NEXT: xsadddp f0, f1, f0
+; P8-NEXT: fsel f1, f0, f2, f3
+; P8-NEXT: xsnegdp f0, f0
+; P8-NEXT: fsel f1, f0, f1, f3
+; P8-NEXT: blr
+;
+; P9-LABEL: onecmp3_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: vspltisw v2, -1
+; P9-NEXT: xvcvsxwdp vs0, vs34
+; P9-NEXT: xsadddp f0, f1, f0
+; P9-NEXT: fsel f1, f0, f2, f3
+; P9-NEXT: xsnegdp f0, f0
+; P9-NEXT: fsel f1, f0, f1, f3
+; P9-NEXT: blr
+entry:
+ %cmp = fcmp nnan ninf nsz oeq double %a, 1.000000e+00
+ %y.z = select i1 %cmp, double %y, double %z
+ ret double %y.z
+}
>From ea7082ac60945ed64201a2d1f9884ff9825f038c Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sat, 29 Nov 2025 14:30:48 +0800
Subject: [PATCH 05/12] Fix AArch64 tests
---
.../arm64-constrained-fcmp-no-nans-opt.ll | 26 +-
.../AArch64/build-vector-dup-simd-nnan.ll | 294 ++++++++++++++++++
.../CodeGen/AArch64/build-vector-dup-simd.ll | 147 ++-------
3 files changed, 341 insertions(+), 126 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/build-vector-dup-simd-nnan.ll
diff --git a/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll b/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll
index 968acb2565b4e..2ddaf0ecf7619 100644
--- a/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm64-eabi -mattr=+fullfp16 -enable-no-nans-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-eabi -mattr=+fullfp16 | FileCheck %s
declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
@@ -7,7 +7,7 @@ declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, met
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ueq(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_ueq(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -16,7 +16,7 @@ define i1 @f32_constrained_fcmp_ueq(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, ne
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_une(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_une(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -25,7 +25,7 @@ define i1 @f32_constrained_fcmp_une(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ugt(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_ugt(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -34,7 +34,7 @@ define i1 @f32_constrained_fcmp_ugt(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, ge
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_uge(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_uge(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -43,7 +43,7 @@ define i1 @f32_constrained_fcmp_uge(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ult(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_ult(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -52,7 +52,7 @@ define i1 @f32_constrained_fcmp_ult(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp s0, s1
; CHECK-NEXT: cset w0, le
; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ule(float %a, float %b) nounwind ssp strictfp {
+define i1 @f32_constrained_fcmp_ule(float nofpclass(nan) %a, float nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -61,7 +61,7 @@ define i1 @f32_constrained_fcmp_ule(float %a, float %b) nounwind ssp strictfp {
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ueq(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_ueq(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -70,7 +70,7 @@ define i1 @f64_constrained_fcmp_ueq(double %a, double %b) nounwind ssp strictfp
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, ne
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_une(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_une(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -79,7 +79,7 @@ define i1 @f64_constrained_fcmp_une(double %a, double %b) nounwind ssp strictfp
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ugt(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_ugt(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -88,7 +88,7 @@ define i1 @f64_constrained_fcmp_ugt(double %a, double %b) nounwind ssp strictfp
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, ge
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_uge(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_uge(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -97,7 +97,7 @@ define i1 @f64_constrained_fcmp_uge(double %a, double %b) nounwind ssp strictfp
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ult(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_ult(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict")
ret i1 %cmp
}
@@ -106,7 +106,7 @@ define i1 @f64_constrained_fcmp_ult(double %a, double %b) nounwind ssp strictfp
; CHECK: fcmp d0, d1
; CHECK-NEXT: cset w0, le
; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ule(double %a, double %b) nounwind ssp strictfp {
+define i1 @f64_constrained_fcmp_ule(double nofpclass(nan) %a, double nofpclass(nan) %b) nounwind ssp strictfp {
%cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict")
ret i1 %cmp
}
diff --git a/llvm/test/CodeGen/AArch64/build-vector-dup-simd-nnan.ll b/llvm/test/CodeGen/AArch64/build-vector-dup-simd-nnan.ll
new file mode 100644
index 0000000000000..440fd2ba7f8f7
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/build-vector-dup-simd-nnan.ll
@@ -0,0 +1,294 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK
+
+define <1 x float> @dup_v1i32_oeq(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_oeq:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan oeq float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ogt(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ogt:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ogt float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_oge(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_oge:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan oge float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_olt(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_olt:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan olt float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ole(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ole:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s1, s0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ole float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_one(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_one:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan one float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ord(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ord:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s2, s0, s1
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ord float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ueq(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_ueq:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ueq float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ugt(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_ugt:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ugt float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_uge(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_uge:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s0, s1
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan uge float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ult(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_ult:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ult float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_ule(float %a, float %b) {
+;
+;
+; CHECK-LABEL: dup_v1i32_ule:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s1, s0
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ule float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_une(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_une:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan une float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <1 x float> @dup_v1i32_uno(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_uno:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s2, s0, s1
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan uno float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+ ret <1 x float> %1
+}
+
+define <4 x float> @dup_v4i32(float %a, float %b) {
+; CHECK-LABEL: dup_v4i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s0, s1
+; CHECK-NEXT: dup v0.4s, v0.s[0]
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan oge float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <4 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <4 x i32> %vecinit.i to <4 x float>
+ %2 = shufflevector <4 x float> %1, <4 x float> poison, <4 x i32> zeroinitializer
+ ret <4 x float> %2
+}
+
+define <4 x float> @dup_v4i32_reversed(float %a, float %b) {
+; CHECK-LABEL: dup_v4i32_reversed:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: dup v0.4s, v0.s[0]
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ogt float %b, %a
+ %vcmpd.i = sext i1 %0 to i32
+ %vecinit.i = insertelement <4 x i32> poison, i32 %vcmpd.i, i64 0
+ %1 = bitcast <4 x i32> %vecinit.i to <4 x float>
+ %2 = shufflevector <4 x float> %1, <4 x float> poison, <4 x i32> zeroinitializer
+ ret <4 x float> %2
+}
+
+define <2 x double> @dup_v2i64(double %a, double %b) {
+; CHECK-LABEL: dup_v2i64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt d0, d0, d1
+; CHECK-NEXT: dup v0.2d, v0.d[0]
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan ogt double %a, %b
+ %vcmpd.i = sext i1 %0 to i64
+ %vecinit.i = insertelement <2 x i64> poison, i64 %vcmpd.i, i64 0
+ %1 = bitcast <2 x i64> %vecinit.i to <2 x double>
+ %2 = shufflevector <2 x double> %1, <2 x double> poison, <2 x i32> zeroinitializer
+ ret <2 x double> %2
+}
+
+define <8 x half> @dup_v8i16(half %a, half %b) {
+;
+;
+; FIXME: Could be replaced with fcmeq + dup but the type of the former is
+; promoted to i32 during selection and then the optimization does not apply.
+; CHECK-LABEL: dup_v8i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvt s1, h1
+; CHECK-NEXT: fcvt s0, h0
+; CHECK-NEXT: fcmeq s0, s0, s1
+; CHECK-NEXT: ret
+ entry:
+ %0 = fcmp nnan oeq half %a, %b
+ %vcmpd.i = sext i1 %0 to i16
+ %vecinit.i = insertelement <8 x i16> poison, i16 %vcmpd.i, i64 0
+ %1 = bitcast <8 x i16> %vecinit.i to <8 x half>
+ ret <8 x half> %1
+}
+
+define i32 @mask_i32(float %a, float %b) {
+; CHECK-LABEL: mask_i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmp s0, s1
+; CHECK-NEXT: csetm w0, eq
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan oeq float %a, %b
+ %vcmpd.i = sext i1 %0 to i32
+ ret i32 %vcmpd.i
+}
+
+; Verify that a mask is not emitted when (allOnes, allZeros) are not the
+; operands for the SELECT_CC.
+define i32 @bool_i32(float %a, float %b) {
+; CHECK-LABEL: bool_i32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmp s0, s1
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+entry:
+ %0 = fcmp nnan oeq float %a, %b
+ %vcmpd.i = zext i1 %0 to i32
+ ret i32 %vcmpd.i
+}
diff --git a/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll b/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll
index ac0b8e89519dd..2649215d97203 100644
--- a/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll
+++ b/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-NOFULLFP16
-; RUN: llc < %s -mtriple=aarch64 --enable-no-nans-fp-math | FileCheck %s --check-prefixes=CHECK,CHECK-NONANS
; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FULLFP16
define <1 x float> @dup_v1i32_oeq(float %a, float %b) {
@@ -69,27 +68,13 @@ entry:
}
define <1 x float> @dup_v1i32_one(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_one:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmgt s2, s0, s1
-; CHECK-NOFULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-NOFULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-NOFULLFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_one:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmeq s0, s0, s1
-; CHECK-NONANS-NEXT: mvn v0.8b, v0.8b
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_one:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmgt s2, s0, s1
-; CHECK-FULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-FULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-FULLFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_one:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s2, s0, s1
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
entry:
%0 = fcmp one float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -115,26 +100,13 @@ entry:
}
define <1 x float> @dup_v1i32_ueq(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ueq:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmgt s2, s0, s1
-; CHECK-NOFULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-NOFULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ueq:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmeq s0, s0, s1
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ueq:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmgt s2, s0, s1
-; CHECK-FULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-FULLFP16-NEXT: orr v0.16b, v0.16b, v2.16b
-; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_ueq:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s2, s0, s1
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
entry:
%0 = fcmp ueq float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -144,22 +116,11 @@ entry:
}
define <1 x float> @dup_v1i32_ugt(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ugt:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmge s0, s1, s0
-; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ugt:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmgt s0, s0, s1
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ugt:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmge s0, s1, s0
-; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_ugt:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s1, s0
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
entry:
%0 = fcmp ugt float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -169,22 +130,11 @@ entry:
}
define <1 x float> @dup_v1i32_uge(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_uge:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_uge:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmge s0, s0, s1
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_uge:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmgt s0, s1, s0
-; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_uge:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s1, s0
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
entry:
%0 = fcmp uge float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -194,22 +144,11 @@ entry:
}
define <1 x float> @dup_v1i32_ult(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ult:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmge s0, s0, s1
-; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ult:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmgt s0, s1, s0
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ult:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmge s0, s0, s1
-; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_ult:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmge s0, s0, s1
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
entry:
%0 = fcmp ult float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -219,22 +158,11 @@ entry:
}
define <1 x float> @dup_v1i32_ule(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ule:
-; CHECK-NOFULLFP16: // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT: fcmgt s0, s0, s1
-; CHECK-NOFULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT: ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ule:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcmge s0, s1, s0
-; CHECK-NONANS-NEXT: ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ule:
-; CHECK-FULLFP16: // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT: fcmgt s0, s0, s1
-; CHECK-FULLFP16-NEXT: mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT: ret
+; CHECK-LABEL: dup_v1i32_ule:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcmgt s0, s0, s1
+; CHECK-NEXT: mvn v0.8b, v0.8b
+; CHECK-NEXT: ret
entry:
%0 = fcmp ule float %a, %b
%vcmpd.i = sext i1 %0 to i32
@@ -326,13 +254,6 @@ define <8 x half> @dup_v8i16(half %a, half %b) {
; CHECK-NOFULLFP16-NEXT: fcmeq s0, s0, s1
; CHECK-NOFULLFP16-NEXT: ret
;
-; CHECK-NONANS-LABEL: dup_v8i16:
-; CHECK-NONANS: // %bb.0: // %entry
-; CHECK-NONANS-NEXT: fcvt s1, h1
-; CHECK-NONANS-NEXT: fcvt s0, h0
-; CHECK-NONANS-NEXT: fcmeq s0, s0, s1
-; CHECK-NONANS-NEXT: ret
-;
; CHECK-FULLFP16-LABEL: dup_v8i16:
; CHECK-FULLFP16: // %bb.0: // %entry
; CHECK-FULLFP16-NEXT: fcmp h0, h1
>From 652f1f56854c7cb76cc027dc739e2c6d97f339cc Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sun, 30 Nov 2025 21:38:26 +0800
Subject: [PATCH 06/12] [DAGCombiner] Consider fast-math flags from SetCC in
getMinMaxOpcodeForFP
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 27 ++++++++++++-------
1 file changed, 17 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 0f3a207cc6414..58757ffc88976 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6567,21 +6567,23 @@ static bool arebothOperandsNotNan(SDValue Operand1, SDValue Operand2,
}
// FIXME: use FMINIMUMNUM if possible, such as for RISC-V.
-static unsigned getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2,
- ISD::CondCode CC, unsigned OrAndOpcode,
- SelectionDAG &DAG,
- bool isFMAXNUMFMINNUM_IEEE,
- bool isFMAXNUMFMINNUM) {
+static unsigned
+getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2,
+ SDNodeFlags LHSSetCCFlags, SDNodeFlags RHSSetCCFlags,
+ ISD::CondCode CC, unsigned OrAndOpcode, SelectionDAG &DAG,
+ bool isFMAXNUMFMINNUM_IEEE, bool isFMAXNUMFMINNUM) {
// The optimization cannot be applied for all the predicates because
// of the way FMINNUM/FMAXNUM and FMINNUM_IEEE/FMAXNUM_IEEE handle
// NaNs. For FMINNUM_IEEE/FMAXNUM_IEEE, the optimization cannot be
// applied at all if one of the operands is a signaling NaN.
+ bool SetCCNoNaNs = LHSSetCCFlags.hasNoNaNs() && RHSSetCCFlags.hasNoNaNs();
+
// It is safe to use FMINNUM_IEEE/FMAXNUM_IEEE if all the operands
// are non NaN values.
if (((CC == ISD::SETLT || CC == ISD::SETLE) && (OrAndOpcode == ISD::OR)) ||
((CC == ISD::SETGT || CC == ISD::SETGE) && (OrAndOpcode == ISD::AND))) {
- return arebothOperandsNotNan(Operand1, Operand2, DAG) &&
+ return (SetCCNoNaNs || arebothOperandsNotNan(Operand1, Operand2, DAG)) &&
isFMAXNUMFMINNUM_IEEE
? ISD::FMINNUM_IEEE
: ISD::DELETED_NODE;
@@ -6589,7 +6591,7 @@ static unsigned getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2,
if (((CC == ISD::SETGT || CC == ISD::SETGE) && (OrAndOpcode == ISD::OR)) ||
((CC == ISD::SETLT || CC == ISD::SETLE) && (OrAndOpcode == ISD::AND))) {
- return arebothOperandsNotNan(Operand1, Operand2, DAG) &&
+ return (SetCCNoNaNs || arebothOperandsNotNan(Operand1, Operand2, DAG)) &&
isFMAXNUMFMINNUM_IEEE
? ISD::FMAXNUM_IEEE
: ISD::DELETED_NODE;
@@ -6638,6 +6640,8 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
!LHS->hasOneUse() || !RHS->hasOneUse())
return SDValue();
+ SDNodeFlags LHSSetCCFlags = LHS->getFlags();
+ SDNodeFlags RHSSetCCFlags = RHS->getFlags();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
AndOrSETCCFoldKind TargetPreference = TLI.isDesirableToCombineLogicOpOfSETCC(
LogicOp, LHS.getNode(), RHS.getNode());
@@ -6729,11 +6733,14 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
else
NewOpcode = IsSigned ? ISD::SMAX : ISD::UMAX;
} else if (OpVT.isFloatingPoint())
- NewOpcode =
- getMinMaxOpcodeForFP(Operand1, Operand2, CC, LogicOp->getOpcode(),
- DAG, isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM);
+ NewOpcode = getMinMaxOpcodeForFP(
+ Operand1, Operand2, LHSSetCCFlags, RHSSetCCFlags, CC,
+ LogicOp->getOpcode(), DAG, isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM);
if (NewOpcode != ISD::DELETED_NODE) {
+ // Propagate fast-math flags from setcc.
+ SelectionDAG::FlagInserter FlagInserter(DAG, LHS->getFlags() &
+ RHS->getFlags());
SDValue MinMaxValue =
DAG.getNode(NewOpcode, DL, OpVT, Operand1, Operand2);
return DAG.getSetCC(DL, VT, MinMaxValue, CommonValue, CC);
>From c37e814b3c284b66a0c6ee802f5add02bc687d21 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sun, 30 Nov 2025 21:39:30 +0800
Subject: [PATCH 07/12] [AMDGPU] Consider fast-math flags from source in
isCanonicalized
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 3 ++-
llvm/lib/Target/AMDGPU/SIISelLowering.h | 2 +-
llvm/lib/Target/AMDGPU/SIInstrInfo.td | 6 ++++--
llvm/lib/Target/AMDGPU/SIInstructions.td | 5 +----
4 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 3aef0bd31debe..47ccdc5bf3e50 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -14411,6 +14411,7 @@ SDValue SITargetLowering::performRcpCombine(SDNode *N,
}
bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
+ SDNodeFlags UserFlags,
unsigned MaxDepth) const {
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::FCANONICALIZE)
@@ -14610,7 +14611,7 @@ bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
// FIXME: denormalsEnabledForType is broken for dynamic
return denormalsEnabledForType(DAG, Op.getValueType()) &&
- DAG.isKnownNeverSNaN(Op);
+ (UserFlags.hasNoNaNs() || DAG.isKnownNeverSNaN(Op));
}
bool SITargetLowering::isCanonicalized(Register Reg, const MachineFunction &MF,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 74e58f4272e10..13b4facc12b18 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -555,7 +555,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
Register N1) const override;
bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
- unsigned MaxDepth = 5) const;
+ SDNodeFlags UserFlags = {}, unsigned MaxDepth = 5) const;
bool isCanonicalized(Register Reg, const MachineFunction &MF,
unsigned MaxDepth = 5) const;
bool denormalsEnabledForType(const SelectionDAG &DAG, EVT VT) const;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 526250a04e001..800d9f2f12262 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -992,11 +992,13 @@ def MFMALdScaleXForm : SDNodeXForm<timm, [{
return CurDAG->getTargetConstant(New, SDLoc(N), MVT::i32);
}]>;
-def is_canonicalized : PatLeaf<(fAny srcvalue:$src), [{
+def fcanonicalize_canonicalized
+ : PatFrag<(ops node:$op), (fcanonicalize node:$op), [{
const SITargetLowering &Lowering =
*static_cast<const SITargetLowering *>(getTargetLowering());
- return Lowering.isCanonicalized(*CurDAG, Op);
+ return Lowering.isCanonicalized(*CurDAG, Op->getOperand(0), N->getFlags());
}]> {
+ // FIXME: This predicate for GlobalISel is dead code.
let GISelPredicateCode = [{
const SITargetLowering *TLI = static_cast<const SITargetLowering *>(
MF.getSubtarget().getTargetLowering());
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index c5f5b7d53cfb1..83259ee0e47d8 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -3485,10 +3485,7 @@ def : GCNPat<
// If fcanonicalize's operand is implicitly canonicalized, we only need a copy.
let AddedComplexity = 8 in {
foreach vt = [f16, v2f16, f32, v2f32, f64] in {
- def : GCNPat<
- (fcanonicalize (vt is_canonicalized:$src)),
- (COPY vt:$src)
- >;
+ def : GCNPat<(fcanonicalize_canonicalized vt:$src), (COPY vt:$src)>;
}
}
>From 36a9f4a3430fdc9ee79d7fae92430e1432e15f3f Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Sun, 30 Nov 2025 21:40:19 +0800
Subject: [PATCH 08/12] Fix AMDGPU tests
---
.../CodeGen/AMDGPU/combine_andor_with_cmps.ll | 1197 +++++---------
.../AMDGPU/combine_andor_with_cmps_nnan.ll | 1449 +++++++++++++++++
llvm/test/CodeGen/AMDGPU/fmax_legacy.ll | 4 +-
llvm/test/CodeGen/AMDGPU/fmin_legacy.ll | 4 +-
llvm/test/CodeGen/AMDGPU/fold-fabs.ll | 10 +-
.../transform-block-with-return-to-epilog.ll | 2 +-
6 files changed, 1862 insertions(+), 804 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps_nnan.ll
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
index ec92edbe2bf65..114392c688f94 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck %s -check-prefixes=GCN,GFX11,GFX11-TRUE16
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck %s -check-prefixes=GCN,GFX11,GFX11-FAKE16
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 -enable-no-nans-fp-math < %s | FileCheck %s -check-prefixes=GCN,GFX11NONANS,GCN-TRUE16,GFX11NONANS-TRUE16
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 -enable-no-nans-fp-math < %s | FileCheck %s -check-prefixes=GCN,GFX11NONANS,GCN-FAKE16,GFX11NONANS-FAKE16
; The tests check the following optimization of DAGCombiner:
; CMP(A,C)||CMP(B,C) => CMP(MIN/MAX(A,B), C)
@@ -855,21 +853,13 @@ define i1 @test57(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test58(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test58:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test58:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test58:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ugt double %arg1, %arg3
%cmp2 = fcmp ugt double %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -877,21 +867,13 @@ define i1 @test58(double %arg1, double %arg2, double %arg3) #0 {
}
define i1 @test59(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test59:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test59:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test59:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp uge float %arg1, %arg3
%cmp2 = fcmp uge float %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -899,21 +881,13 @@ define i1 @test59(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test60(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test60:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test60:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test60:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ule float %arg1, %arg3
%cmp2 = fcmp ule float %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -921,21 +895,13 @@ define i1 @test60(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test61(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test61:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test61:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test61:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult double %arg1, %arg3
%cmp2 = fcmp ult double %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -1083,22 +1049,14 @@ define i1 @test69(double %arg1, double %arg2, double %arg3) {
}
define i1 @test70(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test70:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test70:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test70:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp olt float %var1, %arg3
@@ -1144,22 +1102,14 @@ define i1 @test72(double %arg1, double %arg2, double %arg3) {
}
define i1 @test73(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test73:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test73:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test73:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp oge float %var1, %arg3
@@ -1169,25 +1119,15 @@ define i1 @test73(float %arg1, float %arg2, float %arg3) {
}
define i1 @test74(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test74:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test74:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test74:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ugt double %var1, %arg3
@@ -1197,22 +1137,14 @@ define i1 @test74(double %arg1, double %arg2, double %arg3) {
}
define i1 @test75(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test75:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test75:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test75:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp uge float %var1, %arg3
@@ -1222,22 +1154,14 @@ define i1 @test75(float %arg1, float %arg2, float %arg3) {
}
define i1 @test76(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test76:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test76:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test76:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp ule float %var1, %arg3
@@ -1247,25 +1171,15 @@ define i1 @test76(float %arg1, float %arg2, float %arg3) {
}
define i1 @test77(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test77:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test77:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test77:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ult double %var1, %arg3
@@ -1289,21 +1203,13 @@ define i1 @test78(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test79(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test79:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test79:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test79:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult float %arg1, %arg3
%cmp2 = fcmp ugt float %arg3, %arg2
%and1 = and i1 %cmp1, %cmp2
@@ -1364,22 +1270,14 @@ define i1 @test82(double %arg1, double %arg2, double %arg3) {
}
define i1 @test83(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test83:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test83:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test83:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp ule float %var1, %arg3
@@ -1408,7 +1306,6 @@ define i1 @test84(half %arg1, half %arg2, half %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0, v2
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test84:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1416,7 +1313,6 @@ define i1 @test84(half %arg1, half %arg2, half %arg3) {
; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test84:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1458,7 +1354,6 @@ define <2 x i1> @test85(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v3, v1
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test85:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1468,7 +1363,6 @@ define <2 x i1> @test85(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GCN-TRUE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v1.h, v2.h
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test85:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1514,7 +1408,6 @@ define <2 x i1> @test86(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v3, v1
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test86:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1524,7 +1417,6 @@ define <2 x i1> @test86(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test86:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1564,7 +1456,6 @@ define i1 @test87(half %arg1, half %arg2, half %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0, v2
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test87:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1572,7 +1463,6 @@ define i1 @test87(half %arg1, half %arg2, half %arg3) {
; GCN-TRUE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test87:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1614,26 +1504,24 @@ define <2 x i1> @test88(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v1
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test88:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-TRUE16-NEXT: v_pk_min_f16 v1, v0, v1
-; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v2.h
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test88:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-FAKE16-NEXT: v_pk_min_f16 v0, v0, v1
; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v1
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
%var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
@@ -1664,20 +1552,18 @@ define i1 @test89(half %arg1, half %arg2, half %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_nlt_f16_e32 vcc_lo, v0, v2
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test89:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l
-; GCN-TRUE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cmp_nlt_f16_e32 vcc_lo, v0.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test89:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1
-; GCN-FAKE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cmp_nlt_f16_e32 vcc_lo, v0, v2
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
%var1 = call half @llvm.canonicalize.f16(half %arg1)
@@ -1708,20 +1594,18 @@ define i1 @test90(half %arg1, half %arg2, half %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test90:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l
-; GCN-TRUE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test90:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1
-; GCN-FAKE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
%var1 = call half @llvm.canonicalize.f16(half %arg1)
@@ -1758,26 +1642,24 @@ define <2 x i1> @test91(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
; GFX11-FAKE16-NEXT: v_cmp_nge_f16_e32 vcc_lo, v3, v1
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test91:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v0, v1
-; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cmp_nge_f16_e32 vcc_lo, v1.l, v2.l
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cmp_nge_f16_e32 vcc_lo, v1.h, v2.h
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test91:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v1
; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GCN-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cmp_nge_f16_e32 vcc_lo, v0, v2
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cmp_nge_f16_e32 vcc_lo, v3, v1
; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
%var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
@@ -2175,21 +2057,13 @@ define i1 @test107(float %arg1, float %arg2, float %arg3, float %C) {
}
define i1 @test108(float %arg1, float %arg2, float %arg3, float %C) {
-; GFX11-LABEL: test108:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max3_f32 v0, v0, v1, v2
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v3
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test108:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v3
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test108:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult float %arg1, %C
%cmp2 = fcmp ult float %arg2, %C
%cmp3 = fcmp ult float %arg3, %C
@@ -2199,27 +2073,17 @@ define i1 @test108(float %arg1, float %arg2, float %arg3, float %C) {
}
define i1 @test109(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
-; GFX11-LABEL: test109:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11-NEXT: v_cmp_gt_f32_e64 s0, v1, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test109:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11NONANS-NEXT: v_cmp_gt_f32_e64 s0, v1, v4
-; GFX11NONANS-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test109:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v1, v4
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C
%cmp2 = fcmp olt float %arg2, %C
%cmp3 = fcmp ogt float %arg3, %C
@@ -2257,28 +2121,17 @@ define i1 @test110(float %arg1, float %arg2, float %arg3, float %arg4, float %C1
}
define i1 @test111(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
-; GFX11-LABEL: test111:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT: v_dual_min_f32 v2, v2, v3 :: v_dual_max_f32 v3, v4, v4
-; GFX11-NEXT: v_min3_f32 v0, v0, v1, v2
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v3
-; GFX11-NEXT: v_min3_f32 v0, v5, v6, v0
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test111:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT: v_min3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v4
-; GFX11NONANS-NEXT: v_min3_f32 v0, v5, v6, v0
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test111:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT: v_dual_min_f32 v2, v2, v3 :: v_dual_max_f32 v3, v4, v4
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_min_f32_e32 v0, v0, v3
+; GCN-NEXT: v_min3_f32 v0, v5, v6, v0
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C
%cmp2 = fcmp olt float %arg2, %C
%or1 = or i1 %cmp1, %cmp2
@@ -2298,30 +2151,19 @@ define i1 @test111(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
}
define i1 @test112(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
-; GFX11-LABEL: test112:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, v4, v8
-; GFX11-NEXT: v_dual_max_f32 v5, v5, v5 :: v_dual_min_f32 v2, v2, v3
-; GFX11-NEXT: v_max_f32_e32 v3, v6, v6
-; GFX11-NEXT: v_min3_f32 v0, v0, v1, v2
-; GFX11-NEXT: v_min3_f32 v0, v0, v5, v3
-; GFX11-NEXT: v_cmp_lt_f32_e64 s0, v0, v8
-; GFX11-NEXT: s_or_b32 s0, s0, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test112:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT: v_min3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v4
-; GFX11NONANS-NEXT: v_min3_f32 v0, v5, v6, v0
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test112:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc_lo, v4, v8
+; GCN-NEXT: v_dual_max_f32 v5, v5, v5 :: v_dual_min_f32 v2, v2, v3
+; GCN-NEXT: v_max_f32_e32 v3, v6, v6
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_min3_f32 v0, v0, v5, v3
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v0, v8
+; GCN-NEXT: s_or_b32 s0, s0, vcc_lo
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C
%cmp2 = fcmp olt float %arg2, %C
%or1 = or i1 %cmp1, %cmp2
@@ -2341,24 +2183,16 @@ define i1 @test112(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
}
define i1 @test113(float %arg1, float %arg2, float %arg3, float %C) {
-; GFX11-LABEL: test113:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_nge_f32_e64 s0, v0, v3
-; GFX11-NEXT: s_or_b32 s0, s0, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test113:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_maxmin_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v3
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test113:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v2, v3
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_nge_f32_e64 s0, v0, v3
+; GCN-NEXT: s_or_b32 s0, s0, vcc_lo
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult float %arg1, %C
%cmp2 = fcmp ult float %arg2, %C
%cmp3 = fcmp olt float %arg3, %C
@@ -2368,26 +2202,16 @@ define i1 @test113(float %arg1, float %arg2, float %arg3, float %C) {
}
define i1 @test114(float %arg1, float %arg2, float %arg3, float %C) {
-; GFX11-LABEL: test114:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT: v_cmp_gt_f32_e64 s0, v0, v3
-; GFX11-NEXT: s_and_b32 s0, s0, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test114:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v2, v3
-; GFX11NONANS-NEXT: v_cmp_gt_f32_e64 s0, v0, v3
-; GFX11NONANS-NEXT: s_and_b32 s0, s0, vcc_lo
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test114:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc_lo, v2, v3
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v0, v3
+; GCN-NEXT: s_and_b32 s0, s0, vcc_lo
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ogt float %arg1, %C
%cmp2 = fcmp ogt float %arg2, %C
%cmp3 = fcmp ult float %arg3, %C
@@ -2397,26 +2221,17 @@ define i1 @test114(float %arg1, float %arg2, float %arg3, float %C) {
}
define i1 @test115(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
-; GFX11-LABEL: test115:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v3, v3, v3
-; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11-NEXT: v_cmp_nge_f32_e64 s0, v1, v4
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test115:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT: v_min3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test115:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v3, v3, v3
+; GCN-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT: v_cmp_nge_f32_e64 s0, v1, v4
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C
%cmp2 = fcmp olt float %arg2, %C
%var3 = call float @llvm.canonicalize.f32(float %arg3)
@@ -2430,44 +2245,27 @@ define i1 @test115(float %arg1, float %arg2, float %arg3, float %arg4, float %C)
}
define i1 @test116(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %C) {
-; GFX11-LABEL: test116:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v9, v9, v9 :: v_dual_max_f32 v8, v8, v8
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT: v_dual_max_f32 v5, v5, v5 :: v_dual_max_f32 v4, v4, v4
-; GFX11-NEXT: v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v6, v6, v6
-; GFX11-NEXT: v_min_f32_e32 v8, v8, v9
-; GFX11-NEXT: v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
-; GFX11-NEXT: v_max_f32_e32 v4, v6, v7
-; GFX11-NEXT: v_min3_f32 v0, v0, v1, v8
-; GFX11-NEXT: v_cmp_gt_f32_e32 vcc_lo, v2, v10
-; GFX11-NEXT: v_cmp_lt_f32_e64 s0, v3, v10
-; GFX11-NEXT: v_cmp_gt_f32_e64 s1, v4, v10
-; GFX11-NEXT: v_cmp_lt_f32_e64 s2, v0, v10
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s1, s2, vcc_lo
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test116:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v8, v8, v9
-; GFX11NONANS-NEXT: v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
-; GFX11NONANS-NEXT: v_max_f32_e32 v4, v6, v7
-; GFX11NONANS-NEXT: v_min3_f32 v0, v0, v1, v8
-; GFX11NONANS-NEXT: v_cmp_gt_f32_e32 vcc_lo, v2, v10
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e64 s0, v3, v10
-; GFX11NONANS-NEXT: v_cmp_gt_f32_e64 s1, v4, v10
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e64 s2, v0, v10
-; GFX11NONANS-NEXT: s_or_b32 s0, s0, s1
-; GFX11NONANS-NEXT: s_or_b32 s1, s2, vcc_lo
-; GFX11NONANS-NEXT: s_or_b32 s0, s0, s1
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test116:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v9, v9, v9 :: v_dual_max_f32 v8, v8, v8
+; GCN-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT: v_dual_max_f32 v5, v5, v5 :: v_dual_max_f32 v4, v4, v4
+; GCN-NEXT: v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v6, v6, v6
+; GCN-NEXT: v_min_f32_e32 v8, v8, v9
+; GCN-NEXT: v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
+; GCN-NEXT: v_max_f32_e32 v4, v6, v7
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v8
+; GCN-NEXT: v_cmp_gt_f32_e32 vcc_lo, v2, v10
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v3, v10
+; GCN-NEXT: v_cmp_gt_f32_e64 s1, v4, v10
+; GCN-NEXT: v_cmp_lt_f32_e64 s2, v0, v10
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: s_or_b32 s1, s2, vcc_lo
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C
%cmp2 = fcmp olt float %arg2, %C
%cmp3 = fcmp ogt float %arg3, %C
@@ -2491,45 +2289,27 @@ define i1 @test116(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
}
define i1 @test117(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %arg11, float %arg12, float %C1, float %C2) {
-; GFX11-LABEL: test117:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v6, v6, v6
-; GFX11-NEXT: v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v10, v10, v10
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT: v_dual_max_f32 v11, v11, v11 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v7
-; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v3
-; GFX11-NEXT: v_min3_f32 v3, v4, v5, v6
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v12
-; GFX11-NEXT: v_min3_f32 v0, v8, v9, v1
-; GFX11-NEXT: v_cmp_lt_f32_e64 s0, v2, v13
-; GFX11-NEXT: v_cmp_lt_f32_e64 s1, v3, v13
-; GFX11-NEXT: v_cmp_lt_f32_e64 s2, v0, v12
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_or_b32 s0, s2, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test117:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v6, v6, v7
-; GFX11NONANS-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
-; GFX11NONANS-NEXT: v_min_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT: v_min3_f32 v3, v4, v5, v6
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v12
-; GFX11NONANS-NEXT: v_min3_f32 v0, v8, v9, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e64 s0, v2, v13
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e64 s1, v3, v13
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e64 s2, v0, v12
-; GFX11NONANS-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11NONANS-NEXT: s_or_b32 s0, s0, s1
-; GFX11NONANS-NEXT: s_or_b32 s0, s2, s0
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test117:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v6, v6, v6
+; GCN-NEXT: v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v10, v10, v10
+; GCN-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT: v_dual_max_f32 v11, v11, v11 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT: v_min_f32_e32 v6, v6, v7
+; GCN-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
+; GCN-NEXT: v_min_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v3, v4, v5, v6
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v12
+; GCN-NEXT: v_min3_f32 v0, v8, v9, v1
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v2, v13
+; GCN-NEXT: v_cmp_lt_f32_e64 s1, v3, v13
+; GCN-NEXT: v_cmp_lt_f32_e64 s2, v0, v12
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: s_or_b32 s0, s2, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %C1
%cmp2 = fcmp olt float %arg2, %C1
%cmp3 = fcmp olt float %arg3, %C2
@@ -2661,9 +2441,10 @@ define i1 @test122(double %arg1, double %arg2, double %arg3) #1 {
; GCN-LABEL: test122:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult double %arg1, %arg3
%cmp2 = fcmp ult double %arg2, %arg3
@@ -2677,9 +2458,10 @@ define i1 @test123(double %arg1, double %arg2, double %arg3) #1 {
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
@@ -2814,7 +2596,6 @@ define i1 @test131(i16 %arg1, i32 %arg2) {
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-TRUE16-LABEL: test131:
; GCN-TRUE16: ; %bb.0:
; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -2823,7 +2604,6 @@ define i1 @test131(i16 %arg1, i32 %arg2) {
; GCN-TRUE16-NEXT: s_or_b32 s0, s0, vcc_lo
; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
; GCN-FAKE16-LABEL: test131:
; GCN-FAKE16: ; %bb.0:
; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -2875,22 +2655,14 @@ define i1 @test133(i32 %arg1, i32 %arg2) {
}
define i1 @test134(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test134:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_gt_f32_e64 s0, v2, v1
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test134:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test134:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v2, v1
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %arg3
%cmp2 = fcmp ogt float %arg3, %arg2
%and1 = and i1 %cmp1, %cmp2
@@ -2898,22 +2670,14 @@ define i1 @test134(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test135(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test135:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_nle_f32_e64 s0, v2, v1
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test135:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test135:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_nle_f32_e64 s0, v2, v1
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult float %arg1, %arg3
%cmp2 = fcmp ugt float %arg3, %arg2
%or1 = or i1 %cmp1, %cmp2
@@ -2921,26 +2685,16 @@ define i1 @test135(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test136(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test136:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_ge_f64_e64 s0, v[4:5], v[2:3]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test136:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test136:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_ge_f64_e64 s0, v[4:5], v[2:3]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ole double %var1, %arg3
@@ -2950,23 +2704,15 @@ define i1 @test136(double %arg1, double %arg2, double %arg3) {
}
define i1 @test137(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test137:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_nlt_f32_e64 s0, v2, v1
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test137:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test137:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_nlt_f32_e64 s0, v2, v1
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp ule float %var1, %arg3
@@ -2976,22 +2722,14 @@ define i1 @test137(float %arg1, float %arg2, float %arg3) {
}
define i1 @test138(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test138:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_lt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test138:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test138:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp olt float %arg1, %arg3
%cmp2 = fcmp olt float %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -2999,22 +2737,14 @@ define i1 @test138(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test139(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test139:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test139:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test139:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ole double %arg1, %arg3
%cmp2 = fcmp ole double %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -3022,22 +2752,14 @@ define i1 @test139(double %arg1, double %arg2, double %arg3) #0 {
}
define i1 @test140(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test140:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test140:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test140:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ogt double %arg1, %arg3
%cmp2 = fcmp ogt double %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -3045,22 +2767,14 @@ define i1 @test140(double %arg1, double %arg2, double %arg3) #0 {
}
define i1 @test141(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test141:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_ge_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test141:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test141:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_ge_f32_e64 s0, v1, v2
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp oge float %arg1, %arg3
%cmp2 = fcmp oge float %arg2, %arg3
%and1 = and i1 %cmp1, %cmp2
@@ -3068,22 +2782,14 @@ define i1 @test141(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test142(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test142:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test142:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test142:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ugt double %arg1, %arg3
%cmp2 = fcmp ugt double %arg2, %arg3
%or1 = or i1 %cmp1, %cmp2
@@ -3091,22 +2797,14 @@ define i1 @test142(double %arg1, double %arg2, double %arg3) #0 {
}
define i1 @test143(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test143:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test143:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test143:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp uge float %arg1, %arg3
%cmp2 = fcmp uge float %arg2, %arg3
%or1 = or i1 %cmp1, %cmp2
@@ -3114,22 +2812,14 @@ define i1 @test143(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test144(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test144:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test144:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test144:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ule float %arg1, %arg3
%cmp2 = fcmp ule float %arg2, %arg3
%or1 = or i1 %cmp1, %cmp2
@@ -3137,22 +2827,14 @@ define i1 @test144(float %arg1, float %arg2, float %arg3) #0 {
}
define i1 @test145(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test145:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test145:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test145:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp1 = fcmp ult double %arg1, %arg3
%cmp2 = fcmp ult double %arg2, %arg3
%or1 = or i1 %cmp1, %cmp2
@@ -3160,23 +2842,15 @@ define i1 @test145(double %arg1, double %arg2, double %arg3) #0 {
}
define i1 @test146(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test146:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_lt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test146:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test146:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp olt float %var1, %arg3
@@ -3186,26 +2860,16 @@ define i1 @test146(float %arg1, float %arg2, float %arg3) {
}
define i1 @test147(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test147:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test147:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test147:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ole double %var1, %arg3
@@ -3215,26 +2879,16 @@ define i1 @test147(double %arg1, double %arg2, double %arg3) {
}
define i1 @test148(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test148:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test148:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test148:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ogt double %var1, %arg3
@@ -3244,23 +2898,15 @@ define i1 @test148(double %arg1, double %arg2, double %arg3) {
}
define i1 @test149(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test149:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_ge_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test149:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test149:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_ge_f32_e64 s0, v1, v2
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp oge float %var1, %arg3
@@ -3270,26 +2916,16 @@ define i1 @test149(float %arg1, float %arg2, float %arg3) {
}
define i1 @test150(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test150:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test150:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test150:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ugt double %var1, %arg3
@@ -3299,23 +2935,15 @@ define i1 @test150(double %arg1, double %arg2, double %arg3) {
}
define i1 @test151(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test151:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test151:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test151:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp uge float %var1, %arg3
@@ -3325,23 +2953,15 @@ define i1 @test151(float %arg1, float %arg2, float %arg3) {
}
define i1 @test152(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test152:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test152:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test152:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call float @llvm.canonicalize.f32(float %arg1)
%var2 = call float @llvm.canonicalize.f32(float %arg2)
%cmp1 = fcmp ule float %var1, %arg3
@@ -3351,26 +2971,16 @@ define i1 @test152(float %arg1, float %arg2, float %arg3) {
}
define i1 @test153(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test153:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test153:
-; GFX11NONANS: ; %bb.0:
-; GFX11NONANS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: test153:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%var1 = call double @llvm.canonicalize.f64(double %arg1)
%var2 = call double @llvm.canonicalize.f64(double %arg2)
%cmp1 = fcmp ult double %var1, %arg3
@@ -3387,5 +2997,4 @@ declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>)
attributes #0 = { nounwind "amdgpu-ieee"="false" }
attributes #1 = { nounwind "no-nans-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX11NONANS-FAKE16: {{.*}}
-; GFX11NONANS-TRUE16: {{.*}}
+; GFX11: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps_nnan.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps_nnan.ll
new file mode 100644
index 0000000000000..37ef7949fe5c9
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps_nnan.ll
@@ -0,0 +1,1449 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck %s -check-prefixes=GCN,GCN-TRUE16
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck %s -check-prefixes=GCN,GCN-FAKE16
+
+; The tests check the following optimization of DAGCombiner:
+; CMP(A,C)||CMP(B,C) => CMP(MIN/MAX(A,B), C)
+; CMP(A,C)&&CMP(B,C) => CMP(MIN/MAX(A,B), C)
+
+define i1 @test54(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test54:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %arg3
+ %cmp2 = fcmp nnan olt float %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test55(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test55:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ole double %arg1, %arg3
+ %cmp2 = fcmp nnan ole double %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test56(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test56:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ogt double %arg1, %arg3
+ %cmp2 = fcmp nnan ogt double %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test57(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test57:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan oge float %arg1, %arg3
+ %cmp2 = fcmp nnan oge float %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test58(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test58:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ugt double %arg1, %arg3
+ %cmp2 = fcmp nnan ugt double %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test59(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test59:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan uge float %arg1, %arg3
+ %cmp2 = fcmp nnan uge float %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test60(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test60:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ule float %arg1, %arg3
+ %cmp2 = fcmp nnan ule float %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test61(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test61:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult double %arg1, %arg3
+ %cmp2 = fcmp nnan ult double %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test62(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test62:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, 1.0
+ %add2 = fadd nnan float %arg2, 2.0
+ %cmp1 = fcmp nnan olt float %add1, %arg3
+ %cmp2 = fcmp nnan olt float %add2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test63(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test63:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GCN-NEXT: v_add_f64 v[2:3], v[2:3], 2.0
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan double %arg1, 1.0
+ %add2 = fadd nnan double %arg2, 2.0
+ %cmp1 = fcmp nnan ole double %add1, %arg3
+ %cmp2 = fcmp nnan ole double %add2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test64(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GCN-NEXT: v_add_f64 v[2:3], v[2:3], 2.0
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan double %arg1, 1.0
+ %add2 = fadd nnan double %arg2, 2.0
+ %cmp1 = fcmp nnan ogt double %add1, %arg3
+ %cmp2 = fcmp nnan ogt double %add2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test65(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test65:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, 1.0
+ %add2 = fadd nnan float %arg2, 2.0
+ %cmp1 = fcmp nnan oge float %add1, %arg3
+ %cmp2 = fcmp nnan oge float %add2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test66(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test66:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GCN-NEXT: v_add_f64 v[2:3], v[2:3], 2.0
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan double %arg1, 1.0
+ %add2 = fadd nnan double %arg2, 2.0
+ %cmp1 = fcmp nnan ugt double %add1, %arg3
+ %cmp2 = fcmp nnan ugt double %add2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test67(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test67:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, 1.0
+ %add2 = fadd nnan float %arg2, 2.0
+ %cmp1 = fcmp nnan uge float %add1, %arg3
+ %cmp2 = fcmp nnan uge float %add2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test68(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test68:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, 1.0
+ %add2 = fadd nnan float %arg2, 2.0
+ %cmp1 = fcmp nnan ule float %add1, %arg3
+ %cmp2 = fcmp nnan ule float %add2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test69(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test69:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GCN-NEXT: v_add_f64 v[2:3], v[2:3], 2.0
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan double %arg1, 1.0
+ %add2 = fadd nnan double %arg2, 2.0
+ %cmp1 = fcmp nnan ult double %add1, %arg3
+ %cmp2 = fcmp nnan ult double %add2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test70(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test70:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan olt float %var1, %arg3
+ %cmp2 = fcmp nnan olt float %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test71(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test71:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ole double %var1, %arg3
+ %cmp2 = fcmp nnan ole double %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test72(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test72:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ogt double %var1, %arg3
+ %cmp2 = fcmp nnan ogt double %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test73(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test73:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan oge float %var1, %arg3
+ %cmp2 = fcmp nnan oge float %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test74(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test74:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ugt double %var1, %arg3
+ %cmp2 = fcmp nnan ugt double %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test75(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test75:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan uge float %var1, %arg3
+ %cmp2 = fcmp nnan uge float %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test76(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test76:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan ule float %var1, %arg3
+ %cmp2 = fcmp nnan ule float %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test77(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test77:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ult double %var1, %arg3
+ %cmp2 = fcmp nnan ult double %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test78(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test78:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %arg3
+ %cmp2 = fcmp nnan ogt float %arg3, %arg2
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test79(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test79:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult float %arg1, %arg3
+ %cmp2 = fcmp nnan ugt float %arg3, %arg2
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test80(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test80:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, 1.0
+ %add2 = fadd nnan float %arg2, 2.0
+ %cmp1 = fcmp nnan oge float %add1, %arg3
+ %cmp2 = fcmp nnan ole float %arg3, %add2
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test81(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test81:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GCN-NEXT: v_add_f64 v[2:3], v[2:3], 2.0
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan double %arg1, 1.0
+ %add2 = fadd nnan double %arg2, 2.0
+ %cmp1 = fcmp nnan ugt double %add1, %arg3
+ %cmp2 = fcmp nnan ult double %arg3, %add2
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test82(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test82:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ole double %var1, %arg3
+ %cmp2 = fcmp nnan oge double %arg3, %var2
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test83(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test83:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan ule float %var1, %arg3
+ %cmp2 = fcmp nnan uge float %arg3, %var2
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test84(half %arg1, half %arg2, half %arg3) {
+; GCN-TRUE16-LABEL: test84:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l
+; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test84:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1
+; GCN-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+ %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+ %cmp1 = fcmp nnan olt half %var1, %arg3
+ %cmp2 = fcmp nnan olt half %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define <2 x i1> @test85(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GCN-TRUE16-LABEL: test85:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-TRUE16-NEXT: v_pk_min_f16 v1, v0, v1
+; GCN-TRUE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test85:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-FAKE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-FAKE16-NEXT: v_pk_min_f16 v0, v0, v1
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GCN-FAKE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+ %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+ %cmp1 = fcmp nnan ole <2 x half> %var1, %arg3
+ %cmp2 = fcmp nnan ole <2 x half> %var2, %arg3
+ %or1 = or <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %or1
+}
+
+define <2 x i1> @test86(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GCN-TRUE16-LABEL: test86:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v0, v1
+; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test86:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-FAKE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v1
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+ %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+ %cmp1 = fcmp nnan ogt <2 x half> %var1, %arg3
+ %cmp2 = fcmp nnan ogt <2 x half> %var2, %arg3
+ %or1 = or <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %or1
+}
+
+define i1 @test87(half %arg1, half %arg2, half %arg3) {
+; GCN-TRUE16-LABEL: test87:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l
+; GCN-TRUE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test87:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1
+; GCN-FAKE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+ %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+ %cmp1 = fcmp nnan oge half %var1, %arg3
+ %cmp2 = fcmp nnan oge half %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define <2 x i1> @test88(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GCN-TRUE16-LABEL: test88:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-TRUE16-NEXT: v_pk_min_f16 v1, v0, v1
+; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test88:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-FAKE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-FAKE16-NEXT: v_pk_min_f16 v0, v0, v1
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: v_cmp_gt_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+ %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+ %cmp1 = fcmp nnan ugt <2 x half> %var1, %arg3
+ %cmp2 = fcmp nnan ugt <2 x half> %var2, %arg3
+ %and1 = and <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %and1
+}
+
+define i1 @test89(half %arg1, half %arg2, half %arg3) {
+; GCN-TRUE16-LABEL: test89:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l
+; GCN-TRUE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test89:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1
+; GCN-FAKE16-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+ %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+ %cmp1 = fcmp nnan uge half %var1, %arg3
+ %cmp2 = fcmp nnan uge half %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test90(half %arg1, half %arg2, half %arg3) {
+; GCN-TRUE16-LABEL: test90:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l
+; GCN-TRUE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v0.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test90:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1
+; GCN-FAKE16-NEXT: v_cmp_le_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+ %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+ %cmp1 = fcmp nnan ule half %var1, %arg3
+ %cmp2 = fcmp nnan ule half %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define <2 x i1> @test91(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GCN-TRUE16-LABEL: test91:
+; GCN-TRUE16: ; %bb.0:
+; GCN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-TRUE16-NEXT: v_pk_max_f16 v1, v0, v1
+; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.l, v2.l
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v1.h, v2.h
+; GCN-TRUE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test91:
+; GCN-FAKE16: ; %bb.0:
+; GCN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v0
+; GCN-FAKE16-NEXT: v_pk_max_f16 v1, v1, v1
+; GCN-FAKE16-NEXT: v_pk_max_f16 v0, v0, v1
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GCN-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GCN-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: v_cmp_lt_f16_e32 vcc_lo, v3, v1
+; GCN-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+ %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+ %cmp1 = fcmp nnan ult <2 x half> %var1, %arg3
+ %cmp2 = fcmp nnan ult <2 x half> %var2, %arg3
+ %and1 = and <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %and1
+}
+
+define i1 @test107(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test107:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %cmp3 = fcmp nnan olt float %arg3, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %or2 = or i1 %or1, %cmp3
+ ret i1 %or2
+}
+
+define i1 @test108(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test108:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult float %arg1, %C
+ %cmp2 = fcmp nnan ult float %arg2, %C
+ %cmp3 = fcmp nnan ult float %arg3, %C
+ %and1 = and i1 %cmp1, %cmp2
+ %and2 = and i1 %and1, %cmp3
+ ret i1 %and2
+}
+
+define i1 @test109(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
+; GCN-LABEL: test109:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v1, v4
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %cmp3 = fcmp nnan ogt float %arg3, %C
+ %cmp4 = fcmp nnan ogt float %arg4, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %or2 = or i1 %cmp3, %cmp4
+ %or3 = or i1 %or1, %or2
+ ret i1 %or3
+}
+
+define i1 @test110(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
+; GCN-LABEL: test110:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v5
+; GCN-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v3, v3, v7
+; GCN-NEXT: v_dual_max_f32 v0, v0, v1 :: v_dual_min_f32 v1, v2, v3
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v1, v8
+; GCN-NEXT: s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, %C1
+ %add2 = fadd nnan float %arg2, %C2
+ %add3 = fadd nnan float %arg3, %C3
+ %add4 = fadd nnan float %arg4, %C4
+ %cmp1 = fcmp nnan ult float %add1, %C
+ %cmp2 = fcmp nnan ult float %add2, %C
+ %cmp3 = fcmp nnan ugt float %add3, %C
+ %cmp4 = fcmp nnan ugt float %add4, %C
+ %or1 = and i1 %cmp1, %cmp2
+ %or2 = and i1 %cmp3, %cmp4
+ %or3 = and i1 %or1, %or2
+ ret i1 %or3
+}
+
+define i1 @test111(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
+; GCN-LABEL: test111:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_min_f32_e32 v0, v0, v4
+; GCN-NEXT: v_min3_f32 v0, v5, v6, v0
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %cmp3 = fcmp nnan olt float %arg3, %C
+ %cmp4 = fcmp nnan olt float %arg4, %C
+ %or2 = or i1 %cmp3, %cmp4
+ %cmp5 = fcmp nnan olt float %arg5, %C
+ %or3 = or i1 %or1, %or2
+ %or4 = or i1 %or3, %cmp5
+ %cmp6 = fcmp nnan olt float %arg6, %C
+ %cmp7 = fcmp nnan olt float %arg7, %C
+ %or5 = or i1 %cmp6, %cmp7
+ %cmp8 = fcmp nnan olt float %arg8, %C
+ %or6 = or i1 %or5, %or4
+ %or7 = or i1 %or6, %cmp8
+ ret i1 %or6
+}
+
+define i1 @test112(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
+; GCN-LABEL: test112:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_min_f32_e32 v0, v0, v4
+; GCN-NEXT: v_min3_f32 v0, v5, v6, v0
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %cmp3 = fcmp nnan olt float %arg3, %C
+ %cmp4 = fcmp nnan olt float %arg4, %C
+ %or2 = or i1 %cmp3, %cmp4
+ %cmp5 = fcmp nnan ult float %arg5, %C
+ %or3 = or i1 %or1, %or2
+ %or4 = or i1 %or3, %cmp5
+ %cmp6 = fcmp nnan olt float %arg6, %C
+ %cmp7 = fcmp nnan olt float %arg7, %C
+ %or5 = or i1 %cmp6, %cmp7
+ %cmp8 = fcmp nnan ult float %arg8, %C
+ %or6 = or i1 %or5, %or4
+ %or7 = or i1 %or6, %cmp8
+ ret i1 %or6
+}
+
+define i1 @test113(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test113:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_maxmin_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult float %arg1, %C
+ %cmp2 = fcmp nnan ult float %arg2, %C
+ %cmp3 = fcmp nnan olt float %arg3, %C
+ %and1 = and i1 %cmp1, %cmp2
+ %or1 = or i1 %and1, %cmp3
+ ret i1 %or1
+}
+
+define i1 @test114(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test114:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v2, v3
+; GCN-NEXT: v_cmp_gt_f32_e64 s0, v0, v3
+; GCN-NEXT: s_and_b32 s0, s0, vcc_lo
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ogt float %arg1, %C
+ %cmp2 = fcmp nnan ogt float %arg2, %C
+ %cmp3 = fcmp nnan ult float %arg3, %C
+ %and1 = or i1 %cmp1, %cmp2
+ %or1 = and i1 %and1, %cmp3
+ ret i1 %or1
+}
+
+define i1 @test115(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
+; GCN-LABEL: test115:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %var3 = call nnan float @llvm.canonicalize.f32(float %arg3)
+ %var4 = call nnan float @llvm.canonicalize.f32(float %arg4)
+ %cmp3 = fcmp nnan ult float %var3, %C
+ %cmp4 = fcmp nnan ult float %var4, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %and1 = and i1 %cmp3, %cmp4
+ %or2 = or i1 %or1, %and1
+ ret i1 %or2
+}
+
+define i1 @test116(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %C) {
+; GCN-LABEL: test116:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v8, v8, v9
+; GCN-NEXT: v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
+; GCN-NEXT: v_max_f32_e32 v4, v6, v7
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v8
+; GCN-NEXT: v_cmp_gt_f32_e32 vcc_lo, v2, v10
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v3, v10
+; GCN-NEXT: v_cmp_gt_f32_e64 s1, v4, v10
+; GCN-NEXT: v_cmp_lt_f32_e64 s2, v0, v10
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: s_or_b32 s1, s2, vcc_lo
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C
+ %cmp2 = fcmp nnan olt float %arg2, %C
+ %cmp3 = fcmp nnan ogt float %arg3, %C
+ %cmp4 = fcmp nnan ogt float %arg4, %C
+ %cmp5 = fcmp nnan olt float %arg5, %C
+ %cmp6 = fcmp nnan olt float %arg6, %C
+ %cmp7 = fcmp nnan ogt float %arg7, %C
+ %cmp8 = fcmp nnan ogt float %arg8, %C
+ %cmp9 = fcmp nnan olt float %arg9, %C
+ %cmp10 = fcmp nnan olt float %arg10, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %or2 = or i1 %cmp3, %cmp4
+ %or3 = or i1 %cmp5, %cmp6
+ %or4 = or i1 %cmp7, %cmp8
+ %or5 = or i1 %cmp9, %cmp10
+ %or6 = or i1 %or1, %or2
+ %or7 = or i1 %or3, %or4
+ %or8 = or i1 %or5, %or6
+ %or9 = or i1 %or7, %or8
+ ret i1 %or9
+}
+
+define i1 @test117(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %arg11, float %arg12, float %C1, float %C2) {
+; GCN-LABEL: test117:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v6, v6, v7
+; GCN-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
+; GCN-NEXT: v_min_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v3, v4, v5, v6
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v12
+; GCN-NEXT: v_min3_f32 v0, v8, v9, v1
+; GCN-NEXT: v_cmp_lt_f32_e64 s0, v2, v13
+; GCN-NEXT: v_cmp_lt_f32_e64 s1, v3, v13
+; GCN-NEXT: v_cmp_lt_f32_e64 s2, v0, v12
+; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: s_or_b32 s0, s2, s0
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %C1
+ %cmp2 = fcmp nnan olt float %arg2, %C1
+ %cmp3 = fcmp nnan olt float %arg3, %C2
+ %cmp4 = fcmp nnan olt float %arg4, %C2
+ %cmp5 = fcmp nnan olt float %arg5, %C2
+ %cmp6 = fcmp nnan olt float %arg6, %C2
+ %cmp7 = fcmp nnan olt float %arg7, %C2
+ %cmp8 = fcmp nnan olt float %arg8, %C2
+ %cmp9 = fcmp nnan olt float %arg9, %C1
+ %cmp10 = fcmp nnan olt float %arg10, %C1
+ %cmp11 = fcmp nnan olt float %arg11, %C1
+ %cmp12 = fcmp nnan olt float %arg12, %C1
+ %or1 = or i1 %cmp1, %cmp2
+ %or2 = or i1 %cmp3, %cmp4
+ %or3 = or i1 %cmp5, %cmp6
+ %or4 = or i1 %cmp7, %cmp8
+ %or5 = or i1 %cmp9, %cmp10
+ %or6 = or i1 %cmp11, %cmp12
+ %or7 = or i1 %or1, %or2
+ %or8 = or i1 %or3, %or4
+ %or9 = or i1 %or5, %or6
+ %or10 = or i1 %or7, %or8
+ %or11 = or i1 %or9, %or10
+ ret i1 %or11
+}
+
+
+define i1 @test118(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
+; GCN-LABEL: test118:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v5
+; GCN-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v3, v3, v7
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_max3_f32 v0, v0, v2, v3
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, %C1
+ %add2 = fadd nnan float %arg2, %C2
+ %add3 = fadd nnan float %arg3, %C3
+ %add4 = fadd nnan float %arg4, %C4
+ %cmp1 = fcmp nnan ult float %add1, %C
+ %cmp2 = fcmp nnan ult float %add2, %C
+ %cmp3 = fcmp nnan ult float %add3, %C
+ %cmp4 = fcmp nnan ult float %add4, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %and1 = and i1 %cmp3, %cmp4
+ %and2 = and i1 %or1, %and1
+ ret i1 %and2
+}
+
+define i1 @test119(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
+; GCN-LABEL: test119:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v3, v3, v7
+; GCN-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v5
+; GCN-NEXT: v_min_f32_e32 v2, v2, v3
+; GCN-NEXT: v_minmax_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, %C1
+ %add2 = fadd nnan float %arg2, %C2
+ %add3 = fadd nnan float %arg3, %C3
+ %add4 = fadd nnan float %arg4, %C4
+ %cmp1 = fcmp nnan ult float %add1, %C
+ %cmp2 = fcmp nnan ult float %add2, %C
+ %cmp3 = fcmp nnan ult float %add3, %C
+ %cmp4 = fcmp nnan ult float %add4, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %and1 = or i1 %cmp3, %cmp4
+ %and2 = and i1 %or1, %and1
+ ret i1 %and2
+}
+
+define i1 @test120(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
+; GCN-LABEL: test120:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v3, v3, v7
+; GCN-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v5
+; GCN-NEXT: v_max_f32_e32 v2, v2, v3
+; GCN-NEXT: v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, %C1
+ %add2 = fadd nnan float %arg2, %C2
+ %add3 = fadd nnan float %arg3, %C3
+ %add4 = fadd nnan float %arg4, %C4
+ %cmp1 = fcmp nnan ult float %add1, %C
+ %cmp2 = fcmp nnan ult float %add2, %C
+ %cmp3 = fcmp nnan ult float %add3, %C
+ %cmp4 = fcmp nnan ult float %add4, %C
+ %or1 = or i1 %cmp1, %cmp2
+ %and1 = and i1 %cmp3, %cmp4
+ %and2 = or i1 %or1, %and1
+ ret i1 %and2
+}
+
+define i1 @test121(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
+; GCN-LABEL: test121:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v3, v3, v7
+; GCN-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v5
+; GCN-NEXT: v_max_f32_e32 v2, v2, v3
+; GCN-NEXT: v_maxmin_f32 v0, v0, v1, v2
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %add1 = fadd nnan float %arg1, %C1
+ %add2 = fadd nnan float %arg2, %C2
+ %add3 = fadd nnan float %arg3, %C3
+ %add4 = fadd nnan float %arg4, %C4
+ %cmp1 = fcmp nnan ult float %add1, %C
+ %cmp2 = fcmp nnan ult float %add2, %C
+ %cmp3 = fcmp nnan ult float %add3, %C
+ %cmp4 = fcmp nnan ult float %add4, %C
+ %or1 = and i1 %cmp1, %cmp2
+ %and1 = and i1 %cmp3, %cmp4
+ %and2 = or i1 %or1, %and1
+ ret i1 %and2
+}
+
+define i1 @test122(double %arg1, double %arg2, double %arg3) #1 {
+; GCN-LABEL: test122:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult double %arg1, %arg3
+ %cmp2 = fcmp nnan ult double %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test123(double %arg1, double %arg2, double %arg3) #1 {
+; GCN-LABEL: test123:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ogt double %var1, %arg3
+ %cmp2 = fcmp nnan ogt double %var2, %arg3
+ %or1 = and i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test134(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test134:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %arg3
+ %cmp2 = fcmp nnan ogt float %arg3, %arg2
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test135(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test135:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult float %arg1, %arg3
+ %cmp2 = fcmp nnan ugt float %arg3, %arg2
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test136(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test136:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ole double %var1, %arg3
+ %cmp2 = fcmp nnan oge double %arg3, %var2
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test137(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test137:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan ule float %var1, %arg3
+ %cmp2 = fcmp nnan uge float %arg3, %var2
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test138(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test138:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan olt float %arg1, %arg3
+ %cmp2 = fcmp nnan olt float %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test139(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test139:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ole double %arg1, %arg3
+ %cmp2 = fcmp nnan ole double %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test140(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test140:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ogt double %arg1, %arg3
+ %cmp2 = fcmp nnan ogt double %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test141(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test141:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan oge float %arg1, %arg3
+ %cmp2 = fcmp nnan oge float %arg2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test142(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test142:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ugt double %arg1, %arg3
+ %cmp2 = fcmp nnan ugt double %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test143(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test143:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan uge float %arg1, %arg3
+ %cmp2 = fcmp nnan uge float %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test144(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test144:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ule float %arg1, %arg3
+ %cmp2 = fcmp nnan ule float %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test145(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test145:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %cmp1 = fcmp nnan ult double %arg1, %arg3
+ %cmp2 = fcmp nnan ult double %arg2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test146(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test146:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan olt float %var1, %arg3
+ %cmp2 = fcmp nnan olt float %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test147(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test147:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ole double %var1, %arg3
+ %cmp2 = fcmp nnan ole double %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test148(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test148:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ogt double %var1, %arg3
+ %cmp2 = fcmp nnan ogt double %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test149(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test149:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan oge float %var1, %arg3
+ %cmp2 = fcmp nnan oge float %var2, %arg3
+ %and1 = and i1 %cmp1, %cmp2
+ ret i1 %and1
+}
+
+define i1 @test150(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test150:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ugt double %var1, %arg3
+ %cmp2 = fcmp nnan ugt double %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test151(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test151:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan uge float %var1, %arg3
+ %cmp2 = fcmp nnan uge float %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test152(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test152:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_min_f32_e32 v0, v0, v1
+; GCN-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+ %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+ %cmp1 = fcmp nnan ule float %var1, %arg3
+ %cmp2 = fcmp nnan ule float %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+define i1 @test153(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test153:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+ %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+ %cmp1 = fcmp nnan ult double %var1, %arg3
+ %cmp2 = fcmp nnan ult double %var2, %arg3
+ %or1 = or i1 %cmp1, %cmp2
+ ret i1 %or1
+}
+
+declare double @llvm.canonicalize.f64(double)
+declare float @llvm.canonicalize.f32(float)
+declare half @llvm.canonicalize.f16(half)
+declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>)
+
+attributes #0 = { nounwind "amdgpu-ieee"="false" }
+attributes #1 = { nounwind "no-nans-fp-math"="true" }
diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll
index f3a84e6e45260..0537e8c2ed59b 100644
--- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll
@@ -57,9 +57,9 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32_fast(ptr addrspace(1) %out,
; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]]
; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]]
-; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]]
+; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]]
-; VI: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
+; VI: v_cmp_ge_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]]
diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
index 39eefa1879870..b478e9a0830eb 100644
--- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
@@ -81,9 +81,9 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out
; VI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
; VI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
-; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]]
+; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]]
-; VI: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
+; VI: v_cmp_le_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
; VI: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc
define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) %out, float %a, float %b) #0 {
%a.nnan = fadd nnan float %a, 1.0
diff --git a/llvm/test/CodeGen/AMDGPU/fold-fabs.ll b/llvm/test/CodeGen/AMDGPU/fold-fabs.ll
index 6ef89a4ccd485..525923a84dea2 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-fabs.ll
+++ b/llvm/test/CodeGen/AMDGPU/fold-fabs.ll
@@ -9,7 +9,7 @@ define float @fold_abs_in_branch(float %arg1, float %arg2) {
; GFX10-NEXT: s_mov_b32 s4, exec_lo
; GFX10-NEXT: v_add_f32_e32 v1, v0, v1
; GFX10-NEXT: v_add_f32_e64 v0, |v1|, |v1|
-; GFX10-NEXT: v_cmpx_nlt_f32_e32 1.0, v0
+; GFX10-NEXT: v_cmpx_ge_f32_e32 1.0, v0
; GFX10-NEXT: ; %bb.1: ; %if
; GFX10-NEXT: v_mul_f32_e64 v0, 0x3e4ccccd, |v1|
; GFX10-NEXT: ; %bb.2: ; %exit
@@ -40,7 +40,7 @@ define float @fold_abs_in_branch_multiple_users(float %arg1, float %arg2) {
; GFX10-NEXT: s_mov_b32 s4, exec_lo
; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
; GFX10-NEXT: v_add_f32_e64 v1, |v0|, |v0|
-; GFX10-NEXT: v_cmpx_nlt_f32_e32 1.0, v1
+; GFX10-NEXT: v_cmpx_ge_f32_e32 1.0, v1
; GFX10-NEXT: ; %bb.1: ; %if
; GFX10-NEXT: v_mul_f32_e64 v1, 0x3e4ccccd, |v0|
; GFX10-NEXT: ; %bb.2: ; %exit
@@ -126,7 +126,7 @@ define float @fold_abs_in_branch_fabs(float %arg1, float %arg2) {
; GFX10-NEXT: s_mov_b32 s4, exec_lo
; GFX10-NEXT: v_add_f32_e32 v1, v0, v1
; GFX10-NEXT: v_add_f32_e64 v0, |v1|, |v1|
-; GFX10-NEXT: v_cmpx_nlt_f32_e32 1.0, v0
+; GFX10-NEXT: v_cmpx_ge_f32_e32 1.0, v0
; GFX10-NEXT: ; %bb.1: ; %if
; GFX10-NEXT: v_mul_f32_e64 v0, 0x3e4ccccd, |v1|
; GFX10-NEXT: ; %bb.2: ; %exit
@@ -158,7 +158,7 @@ define float @fold_abs_in_branch_phi(float %arg1, float %arg2) {
; GFX10-NEXT: s_mov_b32 s4, exec_lo
; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
; GFX10-NEXT: v_add_f32_e64 v0, |v0|, |v0|
-; GFX10-NEXT: v_cmpx_nlt_f32_e32 1.0, v0
+; GFX10-NEXT: v_cmpx_ge_f32_e32 1.0, v0
; GFX10-NEXT: s_cbranch_execz .LBB5_3
; GFX10-NEXT: ; %bb.1: ; %header.preheader
; GFX10-NEXT: ; implicit-def: $vgpr0
@@ -203,7 +203,7 @@ define float @fold_neg_in_branch(float %arg1, float %arg2) {
; GFX10-NEXT: s_mov_b32 s4, exec_lo
; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
; GFX10-NEXT: v_mov_b32_e32 v1, v0
-; GFX10-NEXT: v_cmpx_nlt_f32_e32 1.0, v0
+; GFX10-NEXT: v_cmpx_ge_f32_e32 1.0, v0
; GFX10-NEXT: ; %bb.1: ; %if
; GFX10-NEXT: v_rcp_f32_e64 v1, -v0
; GFX10-NEXT: v_mul_f32_e64 v1, |v0|, v1
diff --git a/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll b/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll
index 0cf26be3ac24f..77d62a3a9a8cd 100644
--- a/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll
+++ b/llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll
@@ -105,7 +105,7 @@ define amdgpu_ps { <4 x float> } @test_return_to_epilog_with_optimized_kill(floa
; GCN-NEXT: {{ $}}
; GCN-NEXT: renamable $vgpr1 = nofpexcept V_RCP_F32_e32 $vgpr0, implicit $mode, implicit $exec
; GCN-NEXT: $sgpr0_sgpr1 = S_MOV_B64 $exec
- ; GCN-NEXT: nofpexcept V_CMP_NGT_F32_e32 0, killed $vgpr1, implicit-def $vcc, implicit $mode, implicit $exec
+ ; GCN-NEXT: nofpexcept V_CMP_LE_F32_e32 0, killed $vgpr1, implicit-def $vcc, implicit $mode, implicit $exec
; GCN-NEXT: $sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 killed $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
; GCN-NEXT: renamable $sgpr2_sgpr3 = S_XOR_B64 $exec, killed renamable $sgpr2_sgpr3, implicit-def dead $scc
; GCN-NEXT: S_CBRANCH_EXECNZ %bb.3, implicit $exec
>From 1c94fbe1e2510e260140da0fc849165ff49e9195 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 1 Dec 2025 10:27:43 +0800
Subject: [PATCH 09/12] fix setcc-fp in rv
---
llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll | 224 ++++++++++++------------
1 file changed, 112 insertions(+), 112 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
index 5cf8ae5cb18f7..e6fa54c11d71d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
@@ -58,7 +58,7 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_oeq_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_oeq_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -129,7 +129,7 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ogt_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -142,7 +142,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ogt_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -200,7 +200,7 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_oge_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -213,7 +213,7 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_oge_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -271,7 +271,7 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_olt_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -284,7 +284,7 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_olt_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -342,7 +342,7 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ole_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -355,7 +355,7 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ole_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -419,7 +419,7 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_one_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -432,7 +432,7 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_one_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -502,7 +502,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ord_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -519,7 +519,7 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ord_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -587,7 +587,7 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ueq_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -600,7 +600,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ueq_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -661,7 +661,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ugt_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -674,7 +674,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ugt_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -735,7 +735,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_uge_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -748,7 +748,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_uge_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ult_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -822,7 +822,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ult_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -883,7 +883,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ule_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -896,7 +896,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ule_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -954,7 +954,7 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_une_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -967,7 +967,7 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_une_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -1037,7 +1037,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, <vscale x 8 x bfloat> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_uno_vv_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> nofpclass(nan) %va, bfloat nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_uno_vf_nxv8bf16_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
@@ -1134,7 +1134,7 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_oeq_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1153,7 +1153,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_oeq_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1235,7 +1235,7 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ogt_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ogt_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1336,7 +1336,7 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_oge_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1355,7 +1355,7 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_oge_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1437,7 +1437,7 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_olt_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1456,7 +1456,7 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_olt_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1538,7 +1538,7 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ole_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1557,7 +1557,7 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ole_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1651,7 +1651,7 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_one_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1670,7 +1670,7 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_one_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1772,7 +1772,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ord_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1797,7 +1797,7 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ord_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1898,7 +1898,7 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ueq_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -1917,7 +1917,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ueq_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2005,7 +2005,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ugt_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2024,7 +2024,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ugt_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2112,7 +2112,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_uge_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2131,7 +2131,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_uge_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2219,7 +2219,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ult_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2238,7 +2238,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ult_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2326,7 +2326,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_ule_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2345,7 +2345,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_ule_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2427,7 +2427,7 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_une_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2446,7 +2446,7 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_une_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2548,7 +2548,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, <vscale x 8 x half> nofpclass(nan) %vb) {
; ZVFH-LABEL: fcmp_uno_vv_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2573,7 +2573,7 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> nofpclass(nan) %va, half nofpclass(nan) %b) {
; ZVFH-LABEL: fcmp_uno_vf_nxv8f16_nonans:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
@@ -2635,7 +2635,7 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_oeq_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2645,7 +2645,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_oeq_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2691,7 +2691,7 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ogt_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2701,7 +2701,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ogt_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2747,7 +2747,7 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_oge_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2757,7 +2757,7 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_oge_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2803,7 +2803,7 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_olt_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2813,7 +2813,7 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_olt_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2859,7 +2859,7 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ole_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2869,7 +2869,7 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ole_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2921,7 +2921,7 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_one_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2931,7 +2931,7 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_one_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2985,7 +2985,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ord_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -2997,7 +2997,7 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ord_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3052,7 +3052,7 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ueq_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3062,7 +3062,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ueq_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3111,7 +3111,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ugt_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3121,7 +3121,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ugt_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3170,7 +3170,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_uge_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3180,7 +3180,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_uge_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3229,7 +3229,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ult_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3239,7 +3239,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ult_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3288,7 +3288,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ule_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3298,7 +3298,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ule_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3344,7 +3344,7 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_une_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3354,7 +3354,7 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_une_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3408,7 +3408,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f32(<vscale x 8 x float> %va, float %b
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, <vscale x 8 x float> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_uno_vv_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3420,7 +3420,7 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> nofpclass(nan) %va, float nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_uno_vf_nxv8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
@@ -3469,7 +3469,7 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_oeq_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3479,7 +3479,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_oeq_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3525,7 +3525,7 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ogt_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3535,7 +3535,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ogt_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3581,7 +3581,7 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_oge_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3591,7 +3591,7 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_oge_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3637,7 +3637,7 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_olt_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3647,7 +3647,7 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_olt_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3693,7 +3693,7 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ole_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3703,7 +3703,7 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ole_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3755,7 +3755,7 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_one_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3765,7 +3765,7 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_one_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3819,7 +3819,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ord_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3831,7 +3831,7 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ord_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3886,7 +3886,7 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ueq_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3896,7 +3896,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ueq_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3945,7 +3945,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ugt_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -3955,7 +3955,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ugt_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4004,7 +4004,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_uge_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4014,7 +4014,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_uge_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4063,7 +4063,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ult_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4073,7 +4073,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ult_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4122,7 +4122,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_ule_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4132,7 +4132,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_ule_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4178,7 +4178,7 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_une_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4188,7 +4188,7 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_une_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4242,7 +4242,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f64(<vscale x 8 x double> %va, double
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, <vscale x 8 x double> nofpclass(nan) %vb) {
; CHECK-LABEL: fcmp_uno_vv_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
@@ -4254,7 +4254,7 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
ret <vscale x 8 x i1> %vc
}
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> nofpclass(nan) %va, double nofpclass(nan) %b) {
; CHECK-LABEL: fcmp_uno_vf_nxv8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
>From 2270a5c5c1c2ff8ea5e1881b03cd985a5127d0f7 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 1 Dec 2025 10:47:47 +0800
Subject: [PATCH 10/12] Regenerate test/CodeGen/X86/avx10_2-cmp.ll
---
llvm/test/CodeGen/X86/avx10_2-cmp.ll | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/X86/avx10_2-cmp.ll b/llvm/test/CodeGen/X86/avx10_2-cmp.ll
index 566ce533683f7..8117345d9de04 100644
--- a/llvm/test/CodeGen/X86/avx10_2-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx10_2-cmp.ll
@@ -281,14 +281,14 @@ define i1 @constrained_fcmp() {
; X64-LABEL: constrained_fcmp:
; X64: # %bb.0: # %entry
; X64-NEXT: vxorpd %xmm0, %xmm0, %xmm0
-; X64-NEXT: vucomxsd %xmm0, %xmm0
+; X64-NEXT: vcomisd %xmm0, %xmm0
; X64-NEXT: setne %al
; X64-NEXT: retq
;
; X86-LABEL: constrained_fcmp:
; X86: # %bb.0: # %entry
; X86-NEXT: vxorpd %xmm0, %xmm0, %xmm0
-; X86-NEXT: vucomxsd %xmm0, %xmm0
+; X86-NEXT: vcomisd %xmm0, %xmm0
; X86-NEXT: setne %al
; X86-NEXT: retl
entry:
>From 6000daecb6fa6c77010276d465d3acf086e82e8c Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 1 Dec 2025 11:16:06 +0800
Subject: [PATCH 11/12] fix tests
---
llvm/test/CodeGen/AArch64/known-never-nan.ll | 2 +-
llvm/test/CodeGen/AMDGPU/dagcombine-select.ll | 4 ++--
llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll | 4 ++--
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/known-never-nan.ll b/llvm/test/CodeGen/AArch64/known-never-nan.ll
index d024f713a86ca..d522449484981 100644
--- a/llvm/test/CodeGen/AArch64/known-never-nan.ll
+++ b/llvm/test/CodeGen/AArch64/known-never-nan.ll
@@ -12,7 +12,7 @@ define float @fmaxnm(i32 %i1, i32 %i2) #0 {
; CHECK-NEXT: fadd s0, s0, s2
; CHECK-NEXT: fadd s1, s1, s3
; CHECK-NEXT: fcmp s0, s1
-; CHECK-NEXT: fcsel s0, s0, s1, pl
+; CHECK-NEXT: fcsel s0, s0, s1, ge
; CHECK-NEXT: ret
%f1 = uitofp i32 %i1 to float
%fadd1 = fadd float %f1, 11.0
diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
index 39365fe7b6f15..97db09f963d93 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
@@ -591,7 +591,7 @@ define amdgpu_kernel void @frem_constant_sel_constants(ptr addrspace(1) %p, i1 %
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v1, v0, -4.0, s[0:1]
; GFX9-NEXT: s_mov_b32 s0, 0x40a00000
-; GFX9-NEXT: v_cmp_nlt_f32_e64 s[2:3], |v1|, s0
+; GFX9-NEXT: v_cmp_ge_f32_e64 s[2:3], |v1|, s0
; GFX9-NEXT: s_and_b64 vcc, exec, s[2:3]
; GFX9-NEXT: s_cbranch_vccz .LBB26_2
; GFX9-NEXT: ; %bb.1: ; %frem.else
@@ -665,7 +665,7 @@ define amdgpu_kernel void @frem_constant_sel_constants(ptr addrspace(1) %p, i1 %
; GFX942-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX942-NEXT: v_cndmask_b32_e64 v1, v0, -4.0, s[0:1]
; GFX942-NEXT: s_mov_b32 s0, 0x40a00000
-; GFX942-NEXT: v_cmp_nlt_f32_e64 s[2:3], |v1|, s0
+; GFX942-NEXT: v_cmp_ge_f32_e64 s[2:3], |v1|, s0
; GFX942-NEXT: s_and_b64 vcc, exec, s[2:3]
; GFX942-NEXT: s_cbranch_vccz .LBB26_2
; GFX942-NEXT: ; %bb.1: ; %frem.else
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
index db08cb132a3d7..84aae4ed8e557 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
@@ -1728,7 +1728,7 @@ define amdgpu_kernel void @fnge_select_f32_multi_use_regression(float %.i2369) {
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_cmp_nlt_f32_e64 s[0:1], s0, 0
; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; GCN-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
+; GCN-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0
; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v0, vcc
; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-NEXT: v_cmp_le_f32_e32 vcc, 0, v0
@@ -1742,7 +1742,7 @@ define amdgpu_kernel void @fnge_select_f32_multi_use_regression(float %.i2369) {
; GFX11-NEXT: v_cmp_nlt_f32_e64 s0, s0, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT: v_cmp_nge_f32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cmp_lt_f32_e32 vcc_lo, 0, v0
; GFX11-NEXT: v_cndmask_b32_e32 v1, 0, v0, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mul_f32_e64 v0, -v0, v1
>From a5d84db3a90a862d9ea6bf1ecde53928c1ebd2fd Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 1 Dec 2025 16:55:10 +0800
Subject: [PATCH 12/12] Fix CodeGen/ARM/fp16-vminmaxnm-safe.ll
---
llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll b/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
index 52fe5ce1a8a5f..3dcc828167d16 100644
--- a/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
+++ b/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
@@ -483,9 +483,9 @@ define half @fp16_vminmaxnm_neg0(half %a) {
; CHECK-NEXT: vldr.16 s0, .LCPI23_0
; CHECK-NEXT: vmov.f16 s2, r0
; CHECK-NEXT: vminnm.f16 s2, s2, s0
-; CHECK-NEXT: vcmp.f16 s0, s2
+; CHECK-NEXT: vcmp.f16 s2, s0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vselge.f16 s0, s0, s2
+; CHECK-NEXT: vselgt.f16 s0, s2, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 1
More information about the llvm-commits
mailing list