[llvm] [ValueTracking] Do not use FMF from icmp (PR #142266)
Yingwei Zheng via llvm-commits
llvm-commits at lists.llvm.org
Sat May 31 06:30:30 PDT 2025
https://github.com/dtcxzyw updated https://github.com/llvm/llvm-project/pull/142266
>From 8947c32197e33abe332187650e50cc3985456e4b Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Sat, 31 May 2025 19:01:13 +0800
Subject: [PATCH 1/2] [ValueTracking] Do not use FMF from icmp
---
llvm/include/llvm/Analysis/ValueTracking.h | 3 +-
llvm/lib/Analysis/ValueTracking.cpp | 13 +-
.../AMDGPU/select-flags-to-fmin-fmax.ll | 138 ++++++++----------
llvm/test/CodeGen/PowerPC/vec-min-max.ll | 8 +-
.../AArch64/predicated-reduction.ll | 42 +++---
llvm/unittests/Analysis/ValueTrackingTest.cpp | 11 +-
6 files changed, 98 insertions(+), 117 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index d81c3f10bba90..feb7a1fa2cb35 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -889,7 +889,8 @@ inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
/// predicate and given values as its true/false operands would match.
LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(
CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
- Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
+ FastMathFlags FMF = FastMathFlags(), Instruction::CastOps *CastOp = nullptr,
+ unsigned Depth = 0);
/// Determine the pattern for predicate `X Pred Y ? X : Y`.
LLVM_ABI SelectPatternResult getSelectPattern(
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index fc19b2ccf7964..253b8bd8f7bc5 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -8905,19 +8905,20 @@ SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
Value *TrueVal = SI->getTrueValue();
Value *FalseVal = SI->getFalseValue();
- return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
- CastOp, Depth);
+ return llvm::matchDecomposedSelectPattern(
+ CmpI, TrueVal, FalseVal, LHS, RHS,
+ isa<FPMathOperator>(SI) ? SI->getFastMathFlags() : FastMathFlags(),
+ CastOp, Depth);
}
SelectPatternResult llvm::matchDecomposedSelectPattern(
CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
- Instruction::CastOps *CastOp, unsigned Depth) {
+ FastMathFlags FMF, Instruction::CastOps *CastOp, unsigned Depth) {
CmpInst::Predicate Pred = CmpI->getPredicate();
Value *CmpLHS = CmpI->getOperand(0);
Value *CmpRHS = CmpI->getOperand(1);
- FastMathFlags FMF;
- if (isa<FPMathOperator>(CmpI))
- FMF = CmpI->getFastMathFlags();
+ if (isa<FPMathOperator>(CmpI) && CmpI->hasNoNaNs())
+ FMF.setNoNaNs();
// Bail out early.
if (CmpI->isEquality())
diff --git a/llvm/test/CodeGen/AMDGPU/select-flags-to-fmin-fmax.ll b/llvm/test/CodeGen/AMDGPU/select-flags-to-fmin-fmax.ll
index f7bd5f8d5bfb4..39428dc448018 100644
--- a/llvm/test/CodeGen/AMDGPU/select-flags-to-fmin-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-flags-to-fmin-fmax.ll
@@ -355,17 +355,15 @@ define <2 x float> @v_test_fmin_legacy_ule_v2f32_nnan_nsz_flag(<2 x float> %a, <
; GFX7-LABEL: v_test_fmin_legacy_ule_v2f32_nnan_nsz_flag:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_min_legacy_f32_e32 v0, v2, v0
-; GFX7-NEXT: v_min_legacy_f32_e32 v1, v3, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_min_f32_e32 v1, v1, v3
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_test_fmin_legacy_ule_v2f32_nnan_nsz_flag:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_cmp_ngt_f32_e32 vcc, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; GFX9-NEXT: v_cmp_ngt_f32_e32 vcc, v1, v3
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v2
+; GFX9-NEXT: v_min_f32_e32 v1, v1, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_test_fmin_legacy_ule_v2f32_nnan_nsz_flag:
@@ -375,12 +373,7 @@ define <2 x float> @v_test_fmin_legacy_ule_v2f32_nnan_nsz_flag(<2 x float> %a, <
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
-; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v1, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX12-NEXT: v_dual_min_num_f32 v0, v0, v2 :: v_dual_min_num_f32 v1, v1, v3
; GFX12-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ule <2 x float> %a, %b
%val = select nnan nsz <2 x i1> %cmp, <2 x float> %a, <2 x float> %b
@@ -499,17 +492,15 @@ define <2 x float> @v_test_fmax_legacy_uge_v2f32_nnan_nsz_flag(<2 x float> %a, <
; GFX7-LABEL: v_test_fmax_legacy_uge_v2f32_nnan_nsz_flag:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_max_legacy_f32_e32 v0, v2, v0
-; GFX7-NEXT: v_max_legacy_f32_e32 v1, v3, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_max_f32_e32 v1, v1, v3
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_test_fmax_legacy_uge_v2f32_nnan_nsz_flag:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_cmp_nlt_f32_e32 vcc, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; GFX9-NEXT: v_cmp_nlt_f32_e32 vcc, v1, v3
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v2
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_test_fmax_legacy_uge_v2f32_nnan_nsz_flag:
@@ -519,12 +510,7 @@ define <2 x float> @v_test_fmax_legacy_uge_v2f32_nnan_nsz_flag(<2 x float> %a, <
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
-; GFX12-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v1, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v2 :: v_dual_max_num_f32 v1, v1, v3
; GFX12-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp uge <2 x float> %a, %b
%val = select nnan nsz <2 x i1> %cmp, <2 x float> %a, <2 x float> %b
@@ -673,10 +659,10 @@ define half @v_test_fmin_legacy_ule_f16_nnan_nsz_flag(half %a, half %b) {
; GFX7-LABEL: v_test_fmin_legacy_ule_f16_nnan_nsz_flag:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
-; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -852,10 +838,10 @@ define half @v_test_fmax_legacy_uge_f16_nnan_nsz_flag(half %a, half %b) {
; GFX7-LABEL: v_test_fmax_legacy_uge_f16_nnan_nsz_flag:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
-; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -1097,16 +1083,16 @@ define <2 x half> @v_test_fmin_legacy_ule_v2f16_nnan_nsz_flag(<2 x half> %a, <2
; GFX7-LABEL: v_test_fmin_legacy_ule_v2f16_nnan_nsz_flag:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
-; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
-; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
-; GFX7-NEXT: v_min_legacy_f32_e32 v0, v2, v0
-; GFX7-NEXT: v_min_legacy_f32_e32 v1, v3, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_min_f32_e32 v1, v1, v3
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_test_fmin_legacy_ule_v2f16_nnan_nsz_flag:
@@ -1337,16 +1323,16 @@ define <2 x half> @v_test_fmax_legacy_uge_v2f16_nnan_nsz_flag(<2 x half> %a, <2
; GFX7-LABEL: v_test_fmax_legacy_uge_v2f16_nnan_nsz_flag:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
-; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
-; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
-; GFX7-NEXT: v_max_legacy_f32_e32 v0, v2, v0
-; GFX7-NEXT: v_max_legacy_f32_e32 v1, v3, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_max_f32_e32 v1, v1, v3
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_test_fmax_legacy_uge_v2f16_nnan_nsz_flag:
@@ -1667,26 +1653,26 @@ define <4 x half> @v_test_fmin_legacy_ule_v4f16_nnan_nsz_flag(<4 x half> %a, <4
; GFX7-LABEL: v_test_fmin_legacy_ule_v4f16_nnan_nsz_flag:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7
; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6
-; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
-; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5
-; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
-; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
-; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
-; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v6
+; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v7
-; GFX7-NEXT: v_min_legacy_f32_e32 v0, v4, v0
-; GFX7-NEXT: v_min_legacy_f32_e32 v1, v5, v1
-; GFX7-NEXT: v_min_legacy_f32_e32 v2, v6, v2
-; GFX7-NEXT: v_min_legacy_f32_e32 v3, v7, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v4
+; GFX7-NEXT: v_min_f32_e32 v1, v1, v5
+; GFX7-NEXT: v_min_f32_e32 v2, v2, v6
+; GFX7-NEXT: v_min_f32_e32 v3, v3, v7
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_test_fmin_legacy_ule_v4f16_nnan_nsz_flag:
@@ -2009,26 +1995,26 @@ define <4 x half> @v_test_fmax_legacy_uge_v4f16_nnan_nsz_flag(<4 x half> %a, <4
; GFX7-LABEL: v_test_fmax_legacy_uge_v4f16_nnan_nsz_flag:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7
; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6
-; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
-; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5
-; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
-; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
-; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
-; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v6
+; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v7
-; GFX7-NEXT: v_max_legacy_f32_e32 v0, v4, v0
-; GFX7-NEXT: v_max_legacy_f32_e32 v1, v5, v1
-; GFX7-NEXT: v_max_legacy_f32_e32 v2, v6, v2
-; GFX7-NEXT: v_max_legacy_f32_e32 v3, v7, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_f32_e32 v1, v1, v5
+; GFX7-NEXT: v_max_f32_e32 v2, v2, v6
+; GFX7-NEXT: v_max_f32_e32 v3, v3, v7
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_test_fmax_legacy_uge_v4f16_nnan_nsz_flag:
diff --git a/llvm/test/CodeGen/PowerPC/vec-min-max.ll b/llvm/test/CodeGen/PowerPC/vec-min-max.ll
index f204827005e29..8124fde2667dd 100644
--- a/llvm/test/CodeGen/PowerPC/vec-min-max.ll
+++ b/llvm/test/CodeGen/PowerPC/vec-min-max.ll
@@ -79,7 +79,7 @@ define <4 x float> @getsmaxf32(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: blr
entry:
%0 = fcmp nnan nsz oge <4 x float> %a, %b
- %1 = select <4 x i1> %0, <4 x float> %a, <4 x float> %b
+ %1 = select nsz <4 x i1> %0, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
}
@@ -90,7 +90,7 @@ define <2 x double> @getsmaxf64(<2 x double> %a, <2 x double> %b) {
; CHECK-NEXT: blr
entry:
%0 = fcmp nnan nsz oge <2 x double> %a, %b
- %1 = select <2 x i1> %0, <2 x double> %a, <2 x double> %b
+ %1 = select nsz <2 x i1> %0, <2 x double> %a, <2 x double> %b
ret <2 x double> %1
}
@@ -171,7 +171,7 @@ define <4 x float> @getsminf32(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: blr
entry:
%0 = fcmp nnan nsz ole <4 x float> %a, %b
- %1 = select <4 x i1> %0, <4 x float> %a, <4 x float> %b
+ %1 = select nsz <4 x i1> %0, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
}
@@ -182,7 +182,7 @@ define <2 x double> @getsminf64(<2 x double> %a, <2 x double> %b) {
; CHECK-NEXT: blr
entry:
%0 = fcmp nnan nsz ole <2 x double> %a, %b
- %1 = select <2 x i1> %0, <2 x double> %a, <2 x double> %b
+ %1 = select nsz <2 x i1> %0, <2 x double> %a, <2 x double> %b
ret <2 x double> %1
}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
index 57f68e4fc55f7..c7098d2ce96ce 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
@@ -13,7 +13,7 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef
; CHECK: [[FOR_BODY_PREHEADER]]:
; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[RAND_BLOCK_LENGTH]] to i64
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[RAND_BLOCK_LENGTH]], 8
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[FOR_BODY_PREHEADER23:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[FOR_BODY_PREHEADER22:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 2147483640
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x double> poison, double [[Y]], i64 0
@@ -41,10 +41,8 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef
; CHECK-NEXT: [[TMP9:%.*]] = fcmp fast ogt <4 x double> [[TMP7]], zeroinitializer
; CHECK-NEXT: [[TMP10:%.*]] = fmul fast <4 x double> [[TMP6]], [[TMP6]]
; CHECK-NEXT: [[TMP11:%.*]] = fmul fast <4 x double> [[TMP7]], [[TMP7]]
-; CHECK-NEXT: [[TMP24:%.*]] = fcmp fast ole <4 x double> [[TMP6]], splat (double -0.000000e+00)
-; CHECK-NEXT: [[TMP25:%.*]] = fcmp fast ole <4 x double> [[TMP7]], splat (double -0.000000e+00)
-; CHECK-NEXT: [[TMP12:%.*]] = select nnan ninf <4 x i1> [[TMP24]], <4 x double> splat (double -0.000000e+00), <4 x double> [[TMP6]]
-; CHECK-NEXT: [[TMP13:%.*]] = select nnan ninf <4 x i1> [[TMP25]], <4 x double> splat (double -0.000000e+00), <4 x double> [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP8]], <4 x double> [[TMP6]], <4 x double> splat (double -0.000000e+00)
+; CHECK-NEXT: [[TMP13:%.*]] = select <4 x i1> [[TMP9]], <4 x double> [[TMP7]], <4 x double> splat (double -0.000000e+00)
; CHECK-NEXT: [[TMP14]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI16]], [[TMP12]]
; CHECK-NEXT: [[TMP15]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI17]], [[TMP13]]
; CHECK-NEXT: [[TMP16:%.*]] = select <4 x i1> [[TMP8]], <4 x double> [[TMP10]], <4 x double> splat (double -0.000000e+00)
@@ -60,16 +58,16 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef
; CHECK-NEXT: [[BIN_RDX21:%.*]] = fadd reassoc arcp contract afn <4 x double> [[TMP15]], [[TMP14]]
; CHECK-NEXT: [[TMP22:%.*]] = tail call reassoc arcp contract afn double @llvm.vector.reduce.fadd.v4f64(double -0.000000e+00, <4 x double> [[BIN_RDX21]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[WIDE_TRIP_COUNT]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[FOR_BODY_PREHEADER23]]
-; CHECK: [[FOR_BODY_PREHEADER23]]:
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[FOR_BODY_PREHEADER22]]
+; CHECK: [[FOR_BODY_PREHEADER22]]:
; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ 0, %[[FOR_BODY_PREHEADER]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[V1_012_PH:%.*]] = phi double [ 0.000000e+00, %[[FOR_BODY_PREHEADER]] ], [ [[TMP21]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[V0_011_PH:%.*]] = phi double [ 0.000000e+00, %[[FOR_BODY_PREHEADER]] ], [ [[TMP22]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[V1_011_PH:%.*]] = phi double [ 0.000000e+00, %[[FOR_BODY_PREHEADER]] ], [ [[TMP21]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[V0_010_PH:%.*]] = phi double [ 0.000000e+00, %[[FOR_BODY_PREHEADER]] ], [ [[TMP22]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[INDVARS_IV_PH]], %[[FOR_BODY_PREHEADER23]] ]
-; CHECK-NEXT: [[V1_012:%.*]] = phi double [ [[V1_2:%.*]], %[[FOR_BODY]] ], [ [[V1_012_PH]], %[[FOR_BODY_PREHEADER23]] ]
-; CHECK-NEXT: [[V0_011:%.*]] = phi double [ [[V0_2:%.*]], %[[FOR_BODY]] ], [ [[V0_011_PH]], %[[FOR_BODY_PREHEADER23]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[INDVARS_IV_PH]], %[[FOR_BODY_PREHEADER22]] ]
+; CHECK-NEXT: [[V1_012:%.*]] = phi double [ [[V1_2:%.*]], %[[FOR_BODY]] ], [ [[V1_011_PH]], %[[FOR_BODY_PREHEADER22]] ]
+; CHECK-NEXT: [[V0_011:%.*]] = phi double [ [[V0_2:%.*]], %[[FOR_BODY]] ], [ [[V0_010_PH]], %[[FOR_BODY_PREHEADER22]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[SAMPLES]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to double
@@ -77,8 +75,7 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef
; CHECK-NEXT: [[SUB:%.*]] = fsub fast double [[MUL]], [[Z]]
; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast ogt double [[SUB]], 0.000000e+00
; CHECK-NEXT: [[MUL3:%.*]] = fmul fast double [[SUB]], [[SUB]]
-; CHECK-NEXT: [[DOTINV:%.*]] = fcmp fast ole double [[SUB]], -0.000000e+00
-; CHECK-NEXT: [[ADD8:%.*]] = select nnan ninf i1 [[DOTINV]], double -0.000000e+00, double [[SUB]]
+; CHECK-NEXT: [[ADD8:%.*]] = select i1 [[CMP1]], double [[SUB]], double -0.000000e+00
; CHECK-NEXT: [[V0_2]] = fadd reassoc arcp contract afn double [[V0_011]], [[ADD8]]
; CHECK-NEXT: [[ADD4:%.*]] = select i1 [[CMP1]], double [[MUL3]], double -0.000000e+00
; CHECK-NEXT: [[V1_2]] = fadd reassoc arcp contract afn double [[V1_012]], [[ADD4]]
@@ -232,10 +229,8 @@ define nofpclass(nan inf) double @monte_exp(i32 noundef %nblocks, i32 noundef %R
; CHECK-NEXT: [[TMP13:%.*]] = fcmp fast ogt <4 x double> [[TMP11]], zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = fmul fast <4 x double> [[TMP10]], [[TMP10]]
; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <4 x double> [[TMP11]], [[TMP11]]
-; CHECK-NEXT: [[TMP28:%.*]] = fcmp fast ole <4 x double> [[TMP10]], splat (double -0.000000e+00)
-; CHECK-NEXT: [[TMP29:%.*]] = fcmp fast ole <4 x double> [[TMP11]], splat (double -0.000000e+00)
-; CHECK-NEXT: [[TMP16:%.*]] = select nnan ninf <4 x i1> [[TMP28]], <4 x double> splat (double -0.000000e+00), <4 x double> [[TMP10]]
-; CHECK-NEXT: [[TMP17:%.*]] = select nnan ninf <4 x i1> [[TMP29]], <4 x double> splat (double -0.000000e+00), <4 x double> [[TMP11]]
+; CHECK-NEXT: [[TMP16:%.*]] = select <4 x i1> [[TMP12]], <4 x double> [[TMP10]], <4 x double> splat (double -0.000000e+00)
+; CHECK-NEXT: [[TMP17:%.*]] = select <4 x i1> [[TMP13]], <4 x double> [[TMP11]], <4 x double> splat (double -0.000000e+00)
; CHECK-NEXT: [[TMP18]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI32]], [[TMP16]]
; CHECK-NEXT: [[TMP19]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI33]], [[TMP17]]
; CHECK-NEXT: [[TMP20:%.*]] = select <4 x i1> [[TMP12]], <4 x double> [[TMP14]], <4 x double> splat (double -0.000000e+00)
@@ -253,13 +248,13 @@ define nofpclass(nan inf) double @monte_exp(i32 noundef %nblocks, i32 noundef %R
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND1_FOR_INC8_CRIT_EDGE_US]], label %[[FOR_BODY3_US_PREHEADER]]
; CHECK: [[FOR_BODY3_US_PREHEADER]]:
; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ 0, %[[FOR_BODY_US]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[V1_116_US_PH:%.*]] = phi double [ [[V1_021_US]], %[[FOR_BODY_US]] ], [ [[TMP25]], %[[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[V0_115_US_PH:%.*]] = phi double [ [[V0_020_US]], %[[FOR_BODY_US]] ], [ [[TMP26]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[V1_114_US_PH:%.*]] = phi double [ [[V1_021_US]], %[[FOR_BODY_US]] ], [ [[TMP25]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[V0_113_US_PH:%.*]] = phi double [ [[V0_020_US]], %[[FOR_BODY_US]] ], [ [[TMP26]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label %[[FOR_BODY3_US:.*]]
; CHECK: [[FOR_BODY3_US]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY3_US]] ], [ [[INDVARS_IV_PH]], %[[FOR_BODY3_US_PREHEADER]] ]
-; CHECK-NEXT: [[V1_116_US:%.*]] = phi double [ [[V1_2_US:%.*]], %[[FOR_BODY3_US]] ], [ [[V1_116_US_PH]], %[[FOR_BODY3_US_PREHEADER]] ]
-; CHECK-NEXT: [[V0_115_US:%.*]] = phi double [ [[V0_2_US:%.*]], %[[FOR_BODY3_US]] ], [ [[V0_115_US_PH]], %[[FOR_BODY3_US_PREHEADER]] ]
+; CHECK-NEXT: [[V1_116_US:%.*]] = phi double [ [[V1_2_US:%.*]], %[[FOR_BODY3_US]] ], [ [[V1_114_US_PH]], %[[FOR_BODY3_US_PREHEADER]] ]
+; CHECK-NEXT: [[V0_115_US:%.*]] = phi double [ [[V0_2_US:%.*]], %[[FOR_BODY3_US]] ], [ [[V0_113_US_PH]], %[[FOR_BODY3_US_PREHEADER]] ]
; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds nuw float, ptr [[SAMPLES]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX_US]], align 4
; CHECK-NEXT: [[CONV_US:%.*]] = fpext float [[TMP0]] to double
@@ -268,8 +263,7 @@ define nofpclass(nan inf) double @monte_exp(i32 noundef %nblocks, i32 noundef %R
; CHECK-NEXT: [[SUB_US:%.*]] = fsub fast double [[MUL_US]], [[Z]]
; CHECK-NEXT: [[CMP4_US:%.*]] = fcmp fast ogt double [[SUB_US]], 0.000000e+00
; CHECK-NEXT: [[ADD7_US:%.*]] = fmul fast double [[SUB_US]], [[SUB_US]]
-; CHECK-NEXT: [[DOTINV_US:%.*]] = fcmp fast ole double [[SUB_US]], -0.000000e+00
-; CHECK-NEXT: [[ADD12_US:%.*]] = select nnan ninf i1 [[DOTINV_US]], double -0.000000e+00, double [[SUB_US]]
+; CHECK-NEXT: [[ADD12_US:%.*]] = select i1 [[CMP4_US]], double [[SUB_US]], double -0.000000e+00
; CHECK-NEXT: [[V0_2_US]] = fadd reassoc arcp contract afn double [[V0_115_US]], [[ADD12_US]]
; CHECK-NEXT: [[ADD7_US1:%.*]] = select i1 [[CMP4_US]], double [[ADD7_US]], double -0.000000e+00
; CHECK-NEXT: [[V1_2_US]] = fadd reassoc arcp contract afn double [[V1_116_US]], [[ADD7_US1]]
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index a5050542b8186..8343afd63bed5 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -202,12 +202,11 @@ TEST_F(MatchSelectPatternTest, FMinConstantZero) {
}
TEST_F(MatchSelectPatternTest, FMinConstantZeroNsz) {
- parseAssembly(
- "define float @test(float %a) {\n"
- " %1 = fcmp nsz ole float %a, 0.0\n"
- " %A = select i1 %1, float %a, float 0.0\n"
- " ret float %A\n"
- "}\n");
+ parseAssembly("define float @test(float %a) {\n"
+ " %1 = fcmp nsz ole float %a, 0.0\n"
+ " %A = select nsz i1 %1, float %a, float 0.0\n"
+ " ret float %A\n"
+ "}\n");
// But this should be, because we've ignored signed zeroes.
expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true});
}
>From f5805d2b2921ca7573437cf656267caf9aaaff9d Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Sat, 31 May 2025 21:29:59 +0800
Subject: [PATCH 2/2] [CodeGen] Fix ARM tests. NFC.
---
llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll | 6 +-
.../test/CodeGen/ARM/fp16-vminmaxnm-vector.ll | 64 +++++-----
llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll | 88 +++++++-------
llvm/test/CodeGen/ARM/vminmaxnm-safe.ll | 92 +++++++-------
llvm/test/CodeGen/ARM/vminmaxnm.ll | 112 +++++++++---------
5 files changed, 181 insertions(+), 181 deletions(-)
diff --git a/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll b/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
index 996b46c51ab36..52fe5ce1a8a5f 100644
--- a/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
+++ b/llvm/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
@@ -517,9 +517,9 @@ define half @fp16_vminmaxnm_e_0(half %a) {
; CHECK-NEXT: .short 0x0000 @ half 0
entry:
%cmp1 = fcmp nsz ole half 0., %a
- %cond1 = select i1 %cmp1, half 0., half %a
+ %cond1 = select nsz i1 %cmp1, half 0., half %a
%cmp2 = fcmp nsz uge half 0., %cond1
- %cond2 = select i1 %cmp2, half 0., half %cond1
+ %cond2 = select nsz i1 %cmp2, half 0., half %cond1
ret half %cond2
}
@@ -540,7 +540,7 @@ define half @fp16_vminmaxnm_e_neg0(half %a) {
; CHECK-NEXT: .short 0x8000 @ half -0
entry:
%cmp1 = fcmp nsz ule half -0., %a
- %cond1 = select i1 %cmp1, half -0., half %a
+ %cond1 = select nsz i1 %cmp1, half -0., half %a
%cmp2 = fcmp nsz oge half -0., %cond1
%cond2 = select i1 %cmp2, half -0., half %cond1
ret half %cond2
diff --git a/llvm/test/CodeGen/ARM/fp16-vminmaxnm-vector.ll b/llvm/test/CodeGen/ARM/fp16-vminmaxnm-vector.ll
index 6a61bb594b430..0b41c738080a6 100644
--- a/llvm/test/CodeGen/ARM/fp16-vminmaxnm-vector.ll
+++ b/llvm/test/CodeGen/ARM/fp16-vminmaxnm-vector.ll
@@ -10,7 +10,7 @@ define <4 x half> @test1(<4 x half> %A, <4 x half> %B) {
; CHECK: vmaxnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ogt <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
ret <4 x half> %tmp4
}
@@ -19,7 +19,7 @@ define <4 x half> @test2(<4 x half> %A, <4 x half> %B) {
; CHECK: vminnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ogt <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
ret <4 x half> %tmp4
}
@@ -28,7 +28,7 @@ define <4 x half> @test3(<4 x half> %A, <4 x half> %B) {
; CHECK: vminnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast oge <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
ret <4 x half> %tmp4
}
@@ -37,7 +37,7 @@ define <4 x half> @test4(<4 x half> %A, <4 x half> %B) {
; CHECK: vmaxnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast oge <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
ret <4 x half> %tmp4
}
@@ -46,7 +46,7 @@ define <4 x half> @test5(<4 x half> %A, <4 x half> %B) {
; CHECK: vminnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast olt <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
ret <4 x half> %tmp4
}
@@ -55,7 +55,7 @@ define <4 x half> @test6(<4 x half> %A, <4 x half> %B) {
; CHECK: vmaxnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast olt <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
ret <4 x half> %tmp4
}
@@ -64,7 +64,7 @@ define <4 x half> @test7(<4 x half> %A, <4 x half> %B) {
; CHECK: vminnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ole <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
ret <4 x half> %tmp4
}
@@ -73,7 +73,7 @@ define <4 x half> @test8(<4 x half> %A, <4 x half> %B) {
; CHECK: vmaxnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ole <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
ret <4 x half> %tmp4
}
@@ -84,7 +84,7 @@ define <4 x half> @test11(<4 x half> %A, <4 x half> %B) {
; CHECK: vmaxnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ugt <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
ret <4 x half> %tmp4
}
@@ -93,7 +93,7 @@ define <4 x half> @test12(<4 x half> %A, <4 x half> %B) {
; CHECK: vminnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ugt <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
ret <4 x half> %tmp4
}
@@ -102,7 +102,7 @@ define <4 x half> @test13(<4 x half> %A, <4 x half> %B) {
; CHECK: vminnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast uge <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
ret <4 x half> %tmp4
}
@@ -111,7 +111,7 @@ define <4 x half> @test14(<4 x half> %A, <4 x half> %B) {
; CHECK: vmaxnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast uge <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
ret <4 x half> %tmp4
}
@@ -120,7 +120,7 @@ define <4 x half> @test15(<4 x half> %A, <4 x half> %B) {
; CHECK: vminnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ult <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
ret <4 x half> %tmp4
}
@@ -129,7 +129,7 @@ define <4 x half> @test16(<4 x half> %A, <4 x half> %B) {
; CHECK: vmaxnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ult <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
ret <4 x half> %tmp4
}
@@ -138,7 +138,7 @@ define <4 x half> @test17(<4 x half> %A, <4 x half> %B) {
; CHECK: vminnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ule <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
ret <4 x half> %tmp4
}
@@ -147,7 +147,7 @@ define <4 x half> @test18(<4 x half> %A, <4 x half> %B) {
; CHECK: vmaxnm.f16 d0, d0, d1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ule <4 x half> %A, %B
- %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ %tmp4 = select nsz <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
ret <4 x half> %tmp4
}
@@ -160,7 +160,7 @@ define <8 x half> @test201(<8 x half> %A, <8 x half> %B) {
; CHECK: vmaxnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ogt <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
ret <8 x half> %tmp4
}
@@ -169,7 +169,7 @@ define <8 x half> @test202(<8 x half> %A, <8 x half> %B) {
; CHECK: vminnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ogt <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
ret <8 x half> %tmp4
}
@@ -178,7 +178,7 @@ define <8 x half> @test203(<8 x half> %A, <8 x half> %B) {
; CHECK: vmaxnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast oge <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
ret <8 x half> %tmp4
}
@@ -187,7 +187,7 @@ define <8 x half> @test204(<8 x half> %A, <8 x half> %B) {
; CHECK: vminnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast oge <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
ret <8 x half> %tmp4
}
@@ -196,7 +196,7 @@ define <8 x half> @test205(<8 x half> %A, <8 x half> %B) {
; CHECK: vminnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast olt <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
ret <8 x half> %tmp4
}
@@ -205,7 +205,7 @@ define <8 x half> @test206(<8 x half> %A, <8 x half> %B) {
; CHECK: vmaxnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast olt <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
ret <8 x half> %tmp4
}
@@ -214,7 +214,7 @@ define <8 x half> @test207(<8 x half> %A, <8 x half> %B) {
; CHECK: vminnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ole <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
ret <8 x half> %tmp4
}
@@ -223,7 +223,7 @@ define <8 x half> @test208(<8 x half> %A, <8 x half> %B) {
; CHECK: vmaxnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ole <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
ret <8 x half> %tmp4
}
@@ -234,7 +234,7 @@ define <8 x half> @test209(<8 x half> %A, <8 x half> %B) {
; CHECK: vmaxnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ugt <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
ret <8 x half> %tmp4
}
@@ -243,7 +243,7 @@ define <8 x half> @test210(<8 x half> %A, <8 x half> %B) {
; CHECK: vminnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ugt <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
ret <8 x half> %tmp4
}
@@ -252,7 +252,7 @@ define <8 x half> @test211(<8 x half> %A, <8 x half> %B) {
; CHECK: vmaxnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast uge <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
ret <8 x half> %tmp4
}
@@ -261,7 +261,7 @@ define <8 x half> @test214(<8 x half> %A, <8 x half> %B) {
; CHECK: vminnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast uge <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
ret <8 x half> %tmp4
}
@@ -270,7 +270,7 @@ define <8 x half> @test215(<8 x half> %A, <8 x half> %B) {
; CHECK: vminnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ult <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
ret <8 x half> %tmp4
}
@@ -279,7 +279,7 @@ define <8 x half> @test216(<8 x half> %A, <8 x half> %B) {
; CHECK: vmaxnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ult <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
ret <8 x half> %tmp4
}
@@ -288,7 +288,7 @@ define <8 x half> @test217(<8 x half> %A, <8 x half> %B) {
; CHECK: vminnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ule <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
ret <8 x half> %tmp4
}
@@ -297,6 +297,6 @@ define <8 x half> @test218(<8 x half> %A, <8 x half> %B) {
; CHECK: vmaxnm.f16 q0, q0, q1
; CHECK-NEXT: bx lr
%tmp3 = fcmp fast ule <8 x half> %A, %B
- %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ %tmp4 = select nsz <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
ret <8 x half> %tmp4
}
diff --git a/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll b/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll
index 525c27be4f977..33ff71e8c473e 100644
--- a/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll
+++ b/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll
@@ -16,7 +16,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast olt half %0, %1
- %cond = select i1 %cmp, half %0, half %1
+ %cond = select nsz i1 %cmp, half %0, half %1
ret half %cond
}
@@ -29,7 +29,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast ogt half %0, %1
- %cond = select i1 %cmp, half %1, half %0
+ %cond = select nsz i1 %cmp, half %1, half %0
ret half %cond
}
@@ -42,7 +42,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast ult half %0, %1
- %cond = select i1 %cmp, half %0, half %1
+ %cond = select nsz i1 %cmp, half %0, half %1
ret half %cond
}
@@ -55,7 +55,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast ule half %0, %1
- %cond = select i1 %cmp, half %0, half %1
+ %cond = select nsz i1 %cmp, half %0, half %1
ret half %cond
}
@@ -68,7 +68,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast ugt half %0, %1
- %cond = select i1 %cmp, half %1, half %0
+ %cond = select nsz i1 %cmp, half %1, half %0
ret half %cond
}
@@ -81,7 +81,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast ogt half %0, %1
- %cond = select i1 %cmp, half %0, half %1
+ %cond = select nsz i1 %cmp, half %0, half %1
ret half %cond
}
@@ -94,7 +94,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast oge half %0, %1
- %cond = select i1 %cmp, half %0, half %1
+ %cond = select nsz i1 %cmp, half %0, half %1
ret half %cond
}
@@ -107,7 +107,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast olt half %0, %1
- %cond = select i1 %cmp, half %1, half %0
+ %cond = select nsz i1 %cmp, half %1, half %0
ret half %cond
}
@@ -120,7 +120,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast ole half %0, %1
- %cond = select i1 %cmp, half %1, half %0
+ %cond = select nsz i1 %cmp, half %1, half %0
ret half %cond
}
@@ -133,7 +133,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast ugt half %0, %1
- %cond = select i1 %cmp, half %0, half %1
+ %cond = select nsz i1 %cmp, half %0, half %1
ret half %cond
}
@@ -146,7 +146,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast uge half %0, %1
- %cond = select i1 %cmp, half %0, half %1
+ %cond = select nsz i1 %cmp, half %0, half %1
ret half %cond
}
@@ -159,7 +159,7 @@ entry:
%0 = bitcast i16 %a to half
%1 = bitcast i16 %b to half
%cmp = fcmp fast ult half %0, %1
- %cond = select i1 %cmp, half %1, half %0
+ %cond = select nsz i1 %cmp, half %1, half %0
ret half %cond
}
@@ -175,9 +175,9 @@ define half @fp16_vminnm_NNNo(i16 signext %a) {
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp fast olt half %0, 12.
- %cond1 = select i1 %cmp1, half %0, half 12.
+ %cond1 = select nsz i1 %cmp1, half %0, half 12.
%cmp2 = fcmp fast olt half 34., %cond1
- %cond2 = select i1 %cmp2, half 34., half %cond1
+ %cond2 = select nsz i1 %cmp2, half 34., half %cond1
ret half %cond2
}
@@ -191,9 +191,9 @@ define half @fp16_vminnm_NNNo_rev(i16 signext %a) {
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp fast ogt half %0, 56.
- %cond1 = select i1 %cmp1, half 56., half %0
+ %cond1 = select nsz i1 %cmp1, half 56., half %0
%cmp2 = fcmp fast ogt half 78., %cond1
- %cond2 = select i1 %cmp2, half %cond1, half 78.
+ %cond2 = select nsz i1 %cmp2, half %cond1, half 78.
ret half %cond2
}
@@ -207,9 +207,9 @@ define half @fp16_vminnm_NNNu(i16 signext %b) {
entry:
%0 = bitcast i16 %b to half
%cmp1 = fcmp fast ult half 12., %0
- %cond1 = select i1 %cmp1, half 12., half %0
+ %cond1 = select nsz i1 %cmp1, half 12., half %0
%cmp2 = fcmp fast ult half %cond1, 34.
- %cond2 = select i1 %cmp2, half %cond1, half 34.
+ %cond2 = select nsz i1 %cmp2, half %cond1, half 34.
ret half %cond2
}
@@ -223,9 +223,9 @@ define half @fp16_vminnm_NNNule(i16 signext %b) {
entry:
%0 = bitcast i16 %b to half
%cmp1 = fcmp fast ule half 34., %0
- %cond1 = select i1 %cmp1, half 34., half %0
+ %cond1 = select nsz i1 %cmp1, half 34., half %0
%cmp2 = fcmp fast ule half %cond1, 56.
- %cond2 = select i1 %cmp2, half %cond1, half 56.
+ %cond2 = select nsz i1 %cmp2, half %cond1, half 56.
ret half %cond2
}
@@ -239,9 +239,9 @@ define half @fp16_vminnm_NNNu_rev(i16 signext %b) {
entry:
%0 = bitcast i16 %b to half
%cmp1 = fcmp fast ugt half 56., %0
- %cond1 = select i1 %cmp1, half %0, half 56.
+ %cond1 = select nsz i1 %cmp1, half %0, half 56.
%cmp2 = fcmp fast ugt half %cond1, 78.
- %cond2 = select i1 %cmp2, half 78., half %cond1
+ %cond2 = select nsz i1 %cmp2, half 78., half %cond1
ret half %cond2
}
@@ -255,9 +255,9 @@ define half @fp16_vmaxnm_NNNo(i16 signext %a) {
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp fast ogt half %0, 12.
- %cond1 = select i1 %cmp1, half %0, half 12.
+ %cond1 = select nsz i1 %cmp1, half %0, half 12.
%cmp2 = fcmp fast ogt half 34., %cond1
- %cond2 = select i1 %cmp2, half 34., half %cond1
+ %cond2 = select nsz i1 %cmp2, half 34., half %cond1
ret half %cond2
}
@@ -271,9 +271,9 @@ define half @fp16_vmaxnm_NNNoge(i16 signext %a) {
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp fast oge half %0, 34.
- %cond1 = select i1 %cmp1, half %0, half 34.
+ %cond1 = select nsz i1 %cmp1, half %0, half 34.
%cmp2 = fcmp fast oge half 56., %cond1
- %cond2 = select i1 %cmp2, half 56., half %cond1
+ %cond2 = select nsz i1 %cmp2, half 56., half %cond1
ret half %cond2
}
@@ -287,9 +287,9 @@ define half @fp16_vmaxnm_NNNo_rev(i16 signext %a) {
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp fast olt half %0, 56.
- %cond1 = select i1 %cmp1, half 56., half %0
+ %cond1 = select nsz i1 %cmp1, half 56., half %0
%cmp2 = fcmp fast olt half 78., %cond1
- %cond2 = select i1 %cmp2, half %cond1, half 78.
+ %cond2 = select nsz i1 %cmp2, half %cond1, half 78.
ret half %cond2
}
@@ -303,9 +303,9 @@ define half @fp16_vmaxnm_NNNole_rev(i16 signext %a) {
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp fast ole half %0, 78.
- %cond1 = select i1 %cmp1, half 78., half %0
+ %cond1 = select nsz i1 %cmp1, half 78., half %0
%cmp2 = fcmp fast ole half 90., %cond1
- %cond2 = select i1 %cmp2, half %cond1, half 90.
+ %cond2 = select nsz i1 %cmp2, half %cond1, half 90.
ret half %cond2
}
@@ -319,9 +319,9 @@ define half @fp16_vmaxnm_NNNu(i16 signext %b) {
entry:
%0 = bitcast i16 %b to half
%cmp1 = fcmp fast ugt half 12., %0
- %cond1 = select i1 %cmp1, half 12., half %0
+ %cond1 = select nsz i1 %cmp1, half 12., half %0
%cmp2 = fcmp fast ugt half %cond1, 34.
- %cond2 = select i1 %cmp2, half %cond1, half 34.
+ %cond2 = select nsz i1 %cmp2, half %cond1, half 34.
ret half %cond2
}
@@ -335,9 +335,9 @@ define half @fp16_vmaxnm_NNNuge(i16 signext %b) {
entry:
%0 = bitcast i16 %b to half
%cmp1 = fcmp fast uge half 34., %0
- %cond1 = select i1 %cmp1, half 34., half %0
+ %cond1 = select nsz i1 %cmp1, half 34., half %0
%cmp2 = fcmp fast uge half %cond1, 56.
- %cond2 = select i1 %cmp2, half %cond1, half 56.
+ %cond2 = select nsz i1 %cmp2, half %cond1, half 56.
ret half %cond2
}
@@ -351,9 +351,9 @@ define half @fp16_vmaxnm_NNNu_rev(i16 signext %b) {
entry:
%0 = bitcast i16 %b to half
%cmp1 = fcmp fast ult half 56., %0
- %cond1 = select i1 %cmp1, half %0, half 56.
+ %cond1 = select nsz i1 %cmp1, half %0, half 56.
%cmp2 = fcmp fast ult half %cond1, 78.
- %cond2 = select i1 %cmp2, half 78., half %cond1
+ %cond2 = select nsz i1 %cmp2, half 78., half %cond1
ret half %cond2
}
@@ -366,9 +366,9 @@ define half @fp16_vminmaxnm_0(i16 signext %a) {
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp fast olt half %0, 0.
- %cond1 = select i1 %cmp1, half %0, half 0.
+ %cond1 = select nsz i1 %cmp1, half %0, half 0.
%cmp2 = fcmp fast ogt half %cond1, 0.
- %cond2 = select i1 %cmp2, half %cond1, half 0.
+ %cond2 = select nsz i1 %cmp2, half %cond1, half 0.
ret half %cond2
}
@@ -381,9 +381,9 @@ define half @fp16_vminmaxnm_neg0(i16 signext %a) {
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp fast olt half %0, -0.
- %cond1 = select i1 %cmp1, half %0, half -0.
+ %cond1 = select nsz i1 %cmp1, half %0, half -0.
%cmp2 = fcmp fast ugt half %cond1, -0.
- %cond2 = select i1 %cmp2, half %cond1, half -0.
+ %cond2 = select nsz i1 %cmp2, half %cond1, half -0.
ret half %cond2
}
@@ -396,9 +396,9 @@ define half @fp16_vminmaxnm_e_0(i16 signext %a) {
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp fast ule half 0., %0
- %cond1 = select i1 %cmp1, half 0., half %0
+ %cond1 = select nsz i1 %cmp1, half 0., half %0
%cmp2 = fcmp fast uge half 0., %cond1
- %cond2 = select i1 %cmp2, half 0., half %cond1
+ %cond2 = select nsz i1 %cmp2, half 0., half %cond1
ret half %cond2
}
@@ -411,8 +411,8 @@ define half @fp16_vminmaxnm_e_neg0(i16 signext %a) {
entry:
%0 = bitcast i16 %a to half
%cmp1 = fcmp fast ule half -0., %0
- %cond1 = select i1 %cmp1, half -0., half %0
+ %cond1 = select nsz i1 %cmp1, half -0., half %0
%cmp2 = fcmp fast oge half -0., %cond1
- %cond2 = select i1 %cmp2, half -0., half %cond1
+ %cond2 = select nsz i1 %cmp2, half -0., half %cond1
ret half %cond2
}
diff --git a/llvm/test/CodeGen/ARM/vminmaxnm-safe.ll b/llvm/test/CodeGen/ARM/vminmaxnm-safe.ll
index feb23ea1f3982..5577ab49bb830 100644
--- a/llvm/test/CodeGen/ARM/vminmaxnm-safe.ll
+++ b/llvm/test/CodeGen/ARM/vminmaxnm-safe.ll
@@ -44,7 +44,7 @@ define float @fp-armv8_vminnm_o(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vminnm_o":
; CHECK-NOT: vminnm.f32
%cmp = fcmp olt float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -52,7 +52,7 @@ define double @fp-armv8_vminnm_ole(double %a, double %b) {
; CHECK-LABEL: "fp-armv8_vminnm_ole":
; CHECK-NOT: vminnm.f64
%cmp = fcmp ole double %a, %b
- %cond = select i1 %cmp, double %a, double %b
+ %cond = select nsz i1 %cmp, double %a, double %b
ret double %cond
}
@@ -60,7 +60,7 @@ define float @fp-armv8_vminnm_o_rev(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vminnm_o_rev":
; CHECK-NOT: vminnm.f32
%cmp = fcmp ogt float %a, %b
- %cond = select i1 %cmp, float %b, float %a
+ %cond = select nsz i1 %cmp, float %b, float %a
ret float %cond
}
@@ -68,7 +68,7 @@ define double @fp-armv8_vminnm_oge_rev(double %a, double %b) {
; CHECK-LABEL: "fp-armv8_vminnm_oge_rev":
; CHECK-NOT: vminnm.f64
%cmp = fcmp oge double %a, %b
- %cond = select i1 %cmp, double %b, double %a
+ %cond = select nsz i1 %cmp, double %b, double %a
ret double %cond
}
@@ -76,7 +76,7 @@ define float @fp-armv8_vminnm_u(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vminnm_u":
; CHECK-NOT: vminnm.f32
%cmp = fcmp ult float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -84,7 +84,7 @@ define float @fp-armv8_vminnm_ule(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vminnm_ule":
; CHECK-NOT: vminnm.f32
%cmp = fcmp ule float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -92,7 +92,7 @@ define float @fp-armv8_vminnm_u_rev(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vminnm_u_rev":
; CHECK-NOT: vminnm.f32
%cmp = fcmp ugt float %a, %b
- %cond = select i1 %cmp, float %b, float %a
+ %cond = select nsz i1 %cmp, float %b, float %a
ret float %cond
}
@@ -100,7 +100,7 @@ define double @fp-armv8_vminnm_uge_rev(double %a, double %b) {
; CHECK-LABEL: "fp-armv8_vminnm_uge_rev":
; CHECK-NOT: vminnm.f64
%cmp = fcmp uge double %a, %b
- %cond = select i1 %cmp, double %b, double %a
+ %cond = select nsz i1 %cmp, double %b, double %a
ret double %cond
}
@@ -108,7 +108,7 @@ define float @fp-armv8_vmaxnm_o(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vmaxnm_o":
; CHECK-NOT: vmaxnm.f32
%cmp = fcmp ogt float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -116,7 +116,7 @@ define float @fp-armv8_vmaxnm_oge(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vmaxnm_oge":
; CHECK-NOT: vmaxnm.f32
%cmp = fcmp oge float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -124,7 +124,7 @@ define float @fp-armv8_vmaxnm_o_rev(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vmaxnm_o_rev":
; CHECK-NOT: vmaxnm.f32
%cmp = fcmp olt float %a, %b
- %cond = select i1 %cmp, float %b, float %a
+ %cond = select nsz i1 %cmp, float %b, float %a
ret float %cond
}
@@ -132,7 +132,7 @@ define float @fp-armv8_vmaxnm_ole_rev(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vmaxnm_ole_rev":
; CHECK-NOT: vmaxnm.f32
%cmp = fcmp ole float %a, %b
- %cond = select i1 %cmp, float %b, float %a
+ %cond = select nsz i1 %cmp, float %b, float %a
ret float %cond
}
@@ -140,7 +140,7 @@ define float @fp-armv8_vmaxnm_u(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vmaxnm_u":
; CHECK-NOT: vmaxnm.f32
%cmp = fcmp ugt float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -148,7 +148,7 @@ define float @fp-armv8_vmaxnm_uge(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vmaxnm_uge":
; CHECK-NOT: vmaxnm.f32
%cmp = fcmp uge float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -156,7 +156,7 @@ define float @fp-armv8_vmaxnm_u_rev(float %a, float %b) {
; CHECK-LABEL: "fp-armv8_vmaxnm_u_rev":
; CHECK-NOT: vmaxnm.f32
%cmp = fcmp ult float %a, %b
- %cond = select i1 %cmp, float %b, float %a
+ %cond = select nsz i1 %cmp, float %b, float %a
ret float %cond
}
@@ -164,7 +164,7 @@ define double @fp-armv8_vmaxnm_ule_rev(double %a, double %b) {
; CHECK-LABEL: "fp-armv8_vmaxnm_ule_rev":
; CHECK-NOT: vmaxnm.f64
%cmp = fcmp ule double %a, %b
- %cond = select i1 %cmp, double %b, double %a
+ %cond = select nsz i1 %cmp, double %b, double %a
ret double %cond
}
@@ -175,7 +175,7 @@ define float @fp-armv8_vminnm_NNNo(float %a) {
; CHECK: vminnm.f32
; CHECK-NOT: vminnm.f32
%cmp1 = fcmp olt float %a, 12.
- %cond1 = select i1 %cmp1, float %a, float 12.
+ %cond1 = select nsz i1 %cmp1, float %a, float 12.
%cmp2 = fcmp olt float 34., %cond1
%cond2 = select i1 %cmp2, float 34., float %cond1
ret float %cond2
@@ -186,7 +186,7 @@ define double @fp-armv8_vminnm_NNNole(double %a) {
; CHECK: vminnm.f64
; CHECK-NOT: vminnm.f64
%cmp1 = fcmp ole double %a, 34.
- %cond1 = select i1 %cmp1, double %a, double 34.
+ %cond1 = select nsz i1 %cmp1, double %a, double 34.
%cmp2 = fcmp ole double 56., %cond1
%cond2 = select i1 %cmp2, double 56., double %cond1
ret double %cond2
@@ -197,9 +197,9 @@ define float @fp-armv8_vminnm_NNNo_rev(float %a) {
; CHECK: vminnm.f32
; CHECK-NOT: vminnm.f32
%cmp1 = fcmp ogt float %a, 56.
- %cond1 = select i1 %cmp1, float 56., float %a
+ %cond1 = select nsz i1 %cmp1, float 56., float %a
%cmp2 = fcmp ogt float 78., %cond1
- %cond2 = select i1 %cmp2, float %cond1, float 78.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 78.
ret float %cond2
}
@@ -208,9 +208,9 @@ define double @fp-armv8_vminnm_NNNoge_rev(double %a) {
; CHECK: vminnm.f64
; CHECK-NOT: vminnm.f64
%cmp1 = fcmp oge double %a, 78.
- %cond1 = select i1 %cmp1, double 78., double %a
+ %cond1 = select nsz i1 %cmp1, double 78., double %a
%cmp2 = fcmp oge double 90., %cond1
- %cond2 = select i1 %cmp2, double %cond1, double 90.
+ %cond2 = select nsz i1 %cmp2, double %cond1, double 90.
ret double %cond2
}
@@ -219,7 +219,7 @@ define float @fp-armv8_vminnm_NNNu(float %b) {
; CHECK: vminnm.f32
; CHECK-NOT: vminnm.f32
%cmp1 = fcmp ult float 12., %b
- %cond1 = select i1 %cmp1, float 12., float %b
+ %cond1 = select nsz i1 %cmp1, float 12., float %b
%cmp2 = fcmp ult float %cond1, 34.
%cond2 = select i1 %cmp2, float %cond1, float 34.
ret float %cond2
@@ -230,7 +230,7 @@ define float @fp-armv8_vminnm_NNNule(float %b) {
; CHECK: vminnm.f32
; CHECK-NOT: vminnm.f32
%cmp1 = fcmp ule float 34., %b
- %cond1 = select i1 %cmp1, float 34., float %b
+ %cond1 = select nsz i1 %cmp1, float 34., float %b
%cmp2 = fcmp ule float %cond1, 56.
%cond2 = select i1 %cmp2, float %cond1, float 56.
ret float %cond2
@@ -241,9 +241,9 @@ define float @fp-armv8_vminnm_NNNu_rev(float %b) {
; CHECK: vminnm.f32
; CHECK-NOT: vminnm.f32
%cmp1 = fcmp ugt float 56., %b
- %cond1 = select i1 %cmp1, float %b, float 56.
+ %cond1 = select nsz i1 %cmp1, float %b, float 56.
%cmp2 = fcmp ugt float %cond1, 78.
- %cond2 = select i1 %cmp2, float 78., float %cond1
+ %cond2 = select nsz i1 %cmp2, float 78., float %cond1
ret float %cond2
}
@@ -252,9 +252,9 @@ define double @fp-armv8_vminnm_NNNuge_rev(double %b) {
; CHECK: vminnm.f64
; CHECK-NOT: vminnm.f64
%cmp1 = fcmp uge double 78., %b
- %cond1 = select i1 %cmp1, double %b, double 78.
+ %cond1 = select nsz i1 %cmp1, double %b, double 78.
%cmp2 = fcmp uge double %cond1, 90.
- %cond2 = select i1 %cmp2, double 90., double %cond1
+ %cond2 = select nsz i1 %cmp2, double 90., double %cond1
ret double %cond2
}
@@ -263,7 +263,7 @@ define float @fp-armv8_vmaxnm_NNNo(float %a) {
; CHECK: vmaxnm.f32
; CHECK-NOT: vmaxnm.f32
%cmp1 = fcmp ogt float %a, 12.
- %cond1 = select i1 %cmp1, float %a, float 12.
+ %cond1 = select nsz i1 %cmp1, float %a, float 12.
%cmp2 = fcmp ogt float 34., %cond1
%cond2 = select i1 %cmp2, float 34., float %cond1
ret float %cond2
@@ -274,7 +274,7 @@ define float @fp-armv8_vmaxnm_NNNoge(float %a) {
; CHECK: vmaxnm.f32
; CHECK-NOT: vmaxnm.f32
%cmp1 = fcmp oge float %a, 34.
- %cond1 = select i1 %cmp1, float %a, float 34.
+ %cond1 = select nsz i1 %cmp1, float %a, float 34.
%cmp2 = fcmp oge float 56., %cond1
%cond2 = select i1 %cmp2, float 56., float %cond1
ret float %cond2
@@ -285,9 +285,9 @@ define float @fp-armv8_vmaxnm_NNNo_rev(float %a) {
; CHECK: vmaxnm.f32
; CHECK-NOT: vmaxnm.f32
%cmp1 = fcmp olt float %a, 56.
- %cond1 = select i1 %cmp1, float 56., float %a
+ %cond1 = select nsz i1 %cmp1, float 56., float %a
%cmp2 = fcmp olt float 78., %cond1
- %cond2 = select i1 %cmp2, float %cond1, float 78.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 78.
ret float %cond2
}
@@ -296,9 +296,9 @@ define float @fp-armv8_vmaxnm_NNNole_rev(float %a) {
; CHECK: vmaxnm.f32
; CHECK-NOT: vmaxnm.f32
%cmp1 = fcmp ole float %a, 78.
- %cond1 = select i1 %cmp1, float 78., float %a
+ %cond1 = select nsz i1 %cmp1, float 78., float %a
%cmp2 = fcmp ole float 90., %cond1
- %cond2 = select i1 %cmp2, float %cond1, float 90.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 90.
ret float %cond2
}
@@ -307,7 +307,7 @@ define float @fp-armv8_vmaxnm_NNNu(float %b) {
; CHECK: vmaxnm.f32
; CHECK-NOT: vmaxnm.f32
%cmp1 = fcmp ugt float 12., %b
- %cond1 = select i1 %cmp1, float 12., float %b
+ %cond1 = select nsz i1 %cmp1, float 12., float %b
%cmp2 = fcmp ugt float %cond1, 34.
%cond2 = select i1 %cmp2, float %cond1, float 34.
ret float %cond2
@@ -318,7 +318,7 @@ define float @fp-armv8_vmaxnm_NNNuge(float %b) {
; CHECK: vmaxnm.f32
; CHECK-NOT: vmaxnm.f32
%cmp1 = fcmp uge float 34., %b
- %cond1 = select i1 %cmp1, float 34., float %b
+ %cond1 = select nsz i1 %cmp1, float 34., float %b
%cmp2 = fcmp uge float %cond1, 56.
%cond2 = select i1 %cmp2, float %cond1, float 56.
ret float %cond2
@@ -329,9 +329,9 @@ define float @fp-armv8_vmaxnm_NNNu_rev(float %b) {
; CHECK: vmaxnm.f32
; CHECK-NOT: vmaxnm.f32
%cmp1 = fcmp ult float 56., %b
- %cond1 = select i1 %cmp1, float %b, float 56.
+ %cond1 = select nsz i1 %cmp1, float %b, float 56.
%cmp2 = fcmp ult float %cond1, 78.
- %cond2 = select i1 %cmp2, float 78., float %cond1
+ %cond2 = select nsz i1 %cmp2, float 78., float %cond1
ret float %cond2
}
@@ -340,9 +340,9 @@ define double @fp-armv8_vmaxnm_NNNule_rev( double %b) {
; CHECK: vmaxnm.f64
; CHECK-NOT: vmaxnm.f64
%cmp1 = fcmp ule double 78., %b
- %cond1 = select i1 %cmp1, double %b, double 78.
+ %cond1 = select nsz i1 %cmp1, double %b, double 78.
%cmp2 = fcmp ule double %cond1, 90.
- %cond2 = select i1 %cmp2, double 90., double %cond1
+ %cond2 = select nsz i1 %cmp2, double 90., double %cond1
ret double %cond2
}
@@ -351,9 +351,9 @@ define float @fp-armv8_vminmaxnm_0(float %a) {
; CHECK-NOT: vminnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp ult float %a, 0.
- %cond1 = select i1 %cmp1, float %a, float 0.
+ %cond1 = select nsz i1 %cmp1, float %a, float 0.
%cmp2 = fcmp ogt float %cond1, 0.
- %cond2 = select i1 %cmp2, float %cond1, float 0.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 0.
ret float %cond2
}
@@ -362,7 +362,7 @@ define float @fp-armv8_vminmaxnm_neg0(float %a) {
; CHECK: vminnm.f32
; CHECK-NOT: vmaxnm.f32
%cmp1 = fcmp olt float %a, -0.
- %cond1 = select i1 %cmp1, float %a, float -0.
+ %cond1 = select nsz i1 %cmp1, float %a, float -0.
%cmp2 = fcmp ugt float %cond1, -0.
%cond2 = select i1 %cmp2, float %cond1, float -0.
ret float %cond2
@@ -373,9 +373,9 @@ define float @fp-armv8_vminmaxnm_e_0(float %a) {
; CHECK-NOT: vminnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp nsz ole float 0., %a
- %cond1 = select i1 %cmp1, float 0., float %a
+ %cond1 = select nsz i1 %cmp1, float 0., float %a
%cmp2 = fcmp nsz uge float 0., %cond1
- %cond2 = select i1 %cmp2, float 0., float %cond1
+ %cond2 = select nsz i1 %cmp2, float 0., float %cond1
ret float %cond2
}
@@ -384,7 +384,7 @@ define float @fp-armv8_vminmaxnm_e_neg0(float %a) {
; CHECK: vminnm.f32
; CHECK-NOT: vmaxnm.f32
%cmp1 = fcmp nsz ule float -0., %a
- %cond1 = select i1 %cmp1, float -0., float %a
+ %cond1 = select nsz i1 %cmp1, float -0., float %a
%cmp2 = fcmp nsz oge float -0., %cond1
%cond2 = select i1 %cmp2, float -0., float %cond1
ret float %cond2
diff --git a/llvm/test/CodeGen/ARM/vminmaxnm.ll b/llvm/test/CodeGen/ARM/vminmaxnm.ll
index a6803fc78d8ce..bb3ea3067541e 100644
--- a/llvm/test/CodeGen/ARM/vminmaxnm.ll
+++ b/llvm/test/CodeGen/ARM/vminmaxnm.ll
@@ -7,7 +7,7 @@ define float @fp-armv8_vminnm_o(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vminnm.f32
%cmp = fcmp fast olt float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -16,7 +16,7 @@ define double @fp-armv8_vminnm_ole(double %a, double %b) {
; CHECK-NOT: vcmp
; CHECK: vminnm.f64
%cmp = fcmp fast ole double %a, %b
- %cond = select i1 %cmp, double %a, double %b
+ %cond = select nsz i1 %cmp, double %a, double %b
ret double %cond
}
@@ -25,7 +25,7 @@ define float @fp-armv8_vminnm_o_rev(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vminnm.f32
%cmp = fcmp fast ogt float %a, %b
- %cond = select i1 %cmp, float %b, float %a
+ %cond = select nsz i1 %cmp, float %b, float %a
ret float %cond
}
@@ -34,7 +34,7 @@ define double @fp-armv8_vminnm_oge_rev(double %a, double %b) {
; CHECK-NOT: vcmp
; CHECK: vminnm.f64
%cmp = fcmp fast oge double %a, %b
- %cond = select i1 %cmp, double %b, double %a
+ %cond = select nsz i1 %cmp, double %b, double %a
ret double %cond
}
@@ -43,7 +43,7 @@ define float @fp-armv8_vminnm_u(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vminnm.f32
%cmp = fcmp fast ult float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -52,7 +52,7 @@ define float @fp-armv8_vminnm_ule(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vminnm.f32
%cmp = fcmp fast ule float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -61,7 +61,7 @@ define float @fp-armv8_vminnm_u_rev(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vminnm.f32
%cmp = fcmp fast ugt float %a, %b
- %cond = select i1 %cmp, float %b, float %a
+ %cond = select nsz i1 %cmp, float %b, float %a
ret float %cond
}
@@ -70,7 +70,7 @@ define double @fp-armv8_vminnm_uge_rev(double %a, double %b) {
; CHECK-NOT: vcmp
; CHECK: vminnm.f64
%cmp = fcmp fast uge double %a, %b
- %cond = select i1 %cmp, double %b, double %a
+ %cond = select nsz i1 %cmp, double %b, double %a
ret double %cond
}
@@ -79,7 +79,7 @@ define float @fp-armv8_vmaxnm_o(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vmaxnm.f32
%cmp = fcmp fast ogt float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -88,7 +88,7 @@ define float @fp-armv8_vmaxnm_oge(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vmaxnm.f32
%cmp = fcmp fast oge float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -97,7 +97,7 @@ define float @fp-armv8_vmaxnm_o_rev(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vmaxnm.f32
%cmp = fcmp fast olt float %a, %b
- %cond = select i1 %cmp, float %b, float %a
+ %cond = select nsz i1 %cmp, float %b, float %a
ret float %cond
}
@@ -106,7 +106,7 @@ define float @fp-armv8_vmaxnm_ole_rev(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vmaxnm.f32
%cmp = fcmp fast ole float %a, %b
- %cond = select i1 %cmp, float %b, float %a
+ %cond = select nsz i1 %cmp, float %b, float %a
ret float %cond
}
@@ -115,7 +115,7 @@ define float @fp-armv8_vmaxnm_u(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vmaxnm.f32
%cmp = fcmp fast ugt float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -124,7 +124,7 @@ define float @fp-armv8_vmaxnm_uge(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vmaxnm.f32
%cmp = fcmp fast uge float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nsz i1 %cmp, float %a, float %b
ret float %cond
}
@@ -133,7 +133,7 @@ define float @fp-armv8_vmaxnm_u_rev(float %a, float %b) {
; CHECK-NOT: vcmp
; CHECK: vmaxnm.f32
%cmp = fcmp fast ult float %a, %b
- %cond = select i1 %cmp, float %b, float %a
+ %cond = select nsz i1 %cmp, float %b, float %a
ret float %cond
}
@@ -142,7 +142,7 @@ define double @fp-armv8_vmaxnm_ule_rev(double %a, double %b) {
; CHECK-NOT: vcmp
; CHECK: vmaxnm.f64
%cmp = fcmp fast ule double %a, %b
- %cond = select i1 %cmp, double %b, double %a
+ %cond = select nsz i1 %cmp, double %b, double %a
ret double %cond
}
@@ -153,9 +153,9 @@ define float @fp-armv8_vminnm_NNNo(float %a) {
; CHECK: vminnm.f32
; CHECK: vminnm.f32
%cmp1 = fcmp fast olt float %a, 12.
- %cond1 = select i1 %cmp1, float %a, float 12.
+ %cond1 = select nsz i1 %cmp1, float %a, float 12.
%cmp2 = fcmp fast olt float 34., %cond1
- %cond2 = select i1 %cmp2, float 34., float %cond1
+ %cond2 = select nsz i1 %cmp2, float 34., float %cond1
ret float %cond2
}
@@ -164,9 +164,9 @@ define double @fp-armv8_vminnm_NNNole(double %a) {
; CHECK: vminnm.f64
; CHECK: vminnm.f64
%cmp1 = fcmp fast ole double %a, 34.
- %cond1 = select i1 %cmp1, double %a, double 34.
+ %cond1 = select nsz i1 %cmp1, double %a, double 34.
%cmp2 = fcmp fast ole double 56., %cond1
- %cond2 = select i1 %cmp2, double 56., double %cond1
+ %cond2 = select nsz i1 %cmp2, double 56., double %cond1
ret double %cond2
}
@@ -175,9 +175,9 @@ define float @fp-armv8_vminnm_NNNo_rev(float %a) {
; CHECK: vminnm.f32
; CHECK: vminnm.f32
%cmp1 = fcmp fast ogt float %a, 56.
- %cond1 = select i1 %cmp1, float 56., float %a
+ %cond1 = select nsz i1 %cmp1, float 56., float %a
%cmp2 = fcmp fast ogt float 78., %cond1
- %cond2 = select i1 %cmp2, float %cond1, float 78.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 78.
ret float %cond2
}
@@ -186,9 +186,9 @@ define double @fp-armv8_vminnm_NNNoge_rev(double %a) {
; CHECK: vminnm.f64
; CHECK: vminnm.f64
%cmp1 = fcmp fast oge double %a, 78.
- %cond1 = select i1 %cmp1, double 78., double %a
+ %cond1 = select nsz i1 %cmp1, double 78., double %a
%cmp2 = fcmp fast oge double 90., %cond1
- %cond2 = select i1 %cmp2, double %cond1, double 90.
+ %cond2 = select nsz i1 %cmp2, double %cond1, double 90.
ret double %cond2
}
@@ -197,9 +197,9 @@ define float @fp-armv8_vminnm_NNNu(float %b) {
; CHECK: vminnm.f32
; CHECK: vminnm.f32
%cmp1 = fcmp fast ult float 12., %b
- %cond1 = select i1 %cmp1, float 12., float %b
+ %cond1 = select nsz i1 %cmp1, float 12., float %b
%cmp2 = fcmp fast ult float %cond1, 34.
- %cond2 = select i1 %cmp2, float %cond1, float 34.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 34.
ret float %cond2
}
@@ -208,9 +208,9 @@ define float @fp-armv8_vminnm_NNNule(float %b) {
; CHECK: vminnm.f32
; CHECK: vminnm.f32
%cmp1 = fcmp fast ule float 34., %b
- %cond1 = select i1 %cmp1, float 34., float %b
+ %cond1 = select nsz i1 %cmp1, float 34., float %b
%cmp2 = fcmp fast ule float %cond1, 56.
- %cond2 = select i1 %cmp2, float %cond1, float 56.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 56.
ret float %cond2
}
@@ -219,9 +219,9 @@ define float @fp-armv8_vminnm_NNNu_rev(float %b) {
; CHECK: vminnm.f32
; CHECK: vminnm.f32
%cmp1 = fcmp fast ugt float 56., %b
- %cond1 = select i1 %cmp1, float %b, float 56.
+ %cond1 = select nsz i1 %cmp1, float %b, float 56.
%cmp2 = fcmp fast ugt float %cond1, 78.
- %cond2 = select i1 %cmp2, float 78., float %cond1
+ %cond2 = select nsz i1 %cmp2, float 78., float %cond1
ret float %cond2
}
@@ -230,9 +230,9 @@ define double @fp-armv8_vminnm_NNNuge_rev(double %b) {
; CHECK: vminnm.f64
; CHECK: vminnm.f64
%cmp1 = fcmp fast uge double 78., %b
- %cond1 = select i1 %cmp1, double %b, double 78.
+ %cond1 = select nsz i1 %cmp1, double %b, double 78.
%cmp2 = fcmp fast uge double %cond1, 90.
- %cond2 = select i1 %cmp2, double 90., double %cond1
+ %cond2 = select nsz i1 %cmp2, double 90., double %cond1
ret double %cond2
}
@@ -241,9 +241,9 @@ define float @fp-armv8_vmaxnm_NNNo(float %a) {
; CHECK: vmaxnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast ogt float %a, 12.
- %cond1 = select i1 %cmp1, float %a, float 12.
+ %cond1 = select nsz i1 %cmp1, float %a, float 12.
%cmp2 = fcmp fast ogt float 34., %cond1
- %cond2 = select i1 %cmp2, float 34., float %cond1
+ %cond2 = select nsz i1 %cmp2, float 34., float %cond1
ret float %cond2
}
@@ -252,9 +252,9 @@ define float @fp-armv8_vmaxnm_NNNoge(float %a) {
; CHECK: vmaxnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast oge float %a, 34.
- %cond1 = select i1 %cmp1, float %a, float 34.
+ %cond1 = select nsz i1 %cmp1, float %a, float 34.
%cmp2 = fcmp fast oge float 56., %cond1
- %cond2 = select i1 %cmp2, float 56., float %cond1
+ %cond2 = select nsz i1 %cmp2, float 56., float %cond1
ret float %cond2
}
@@ -263,9 +263,9 @@ define float @fp-armv8_vmaxnm_NNNo_rev(float %a) {
; CHECK: vmaxnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast olt float %a, 56.
- %cond1 = select i1 %cmp1, float 56., float %a
+ %cond1 = select nsz i1 %cmp1, float 56., float %a
%cmp2 = fcmp fast olt float 78., %cond1
- %cond2 = select i1 %cmp2, float %cond1, float 78.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 78.
ret float %cond2
}
@@ -274,9 +274,9 @@ define float @fp-armv8_vmaxnm_NNNole_rev(float %a) {
; CHECK: vmaxnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast ole float %a, 78.
- %cond1 = select i1 %cmp1, float 78., float %a
+ %cond1 = select nsz i1 %cmp1, float 78., float %a
%cmp2 = fcmp fast ole float 90., %cond1
- %cond2 = select i1 %cmp2, float %cond1, float 90.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 90.
ret float %cond2
}
@@ -285,9 +285,9 @@ define float @fp-armv8_vmaxnm_NNNu(float %b) {
; CHECK: vmaxnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast ugt float 12., %b
- %cond1 = select i1 %cmp1, float 12., float %b
+ %cond1 = select nsz i1 %cmp1, float 12., float %b
%cmp2 = fcmp fast ugt float %cond1, 34.
- %cond2 = select i1 %cmp2, float %cond1, float 34.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 34.
ret float %cond2
}
@@ -296,9 +296,9 @@ define float @fp-armv8_vmaxnm_NNNuge(float %b) {
; CHECK: vmaxnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast uge float 34., %b
- %cond1 = select i1 %cmp1, float 34., float %b
+ %cond1 = select nsz i1 %cmp1, float 34., float %b
%cmp2 = fcmp fast uge float %cond1, 56.
- %cond2 = select i1 %cmp2, float %cond1, float 56.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 56.
ret float %cond2
}
@@ -307,9 +307,9 @@ define float @fp-armv8_vmaxnm_NNNu_rev(float %b) {
; CHECK: vmaxnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast ult float 56., %b
- %cond1 = select i1 %cmp1, float %b, float 56.
+ %cond1 = select nsz i1 %cmp1, float %b, float 56.
%cmp2 = fcmp fast ult float %cond1, 78.
- %cond2 = select i1 %cmp2, float 78., float %cond1
+ %cond2 = select nsz i1 %cmp2, float 78., float %cond1
ret float %cond2
}
@@ -318,9 +318,9 @@ define double @fp-armv8_vmaxnm_NNNule_rev( double %b) {
; CHECK: vmaxnm.f64
; CHECK: vmaxnm.f64
%cmp1 = fcmp fast ule double 78., %b
- %cond1 = select i1 %cmp1, double %b, double 78.
+ %cond1 = select nsz i1 %cmp1, double %b, double 78.
%cmp2 = fcmp fast ule double %cond1, 90.
- %cond2 = select i1 %cmp2, double 90., double %cond1
+ %cond2 = select nsz i1 %cmp2, double 90., double %cond1
ret double %cond2
}
@@ -330,9 +330,9 @@ define float @fp-armv8_vminmaxnm_0(float %a) {
; CHECK: vminnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast olt float %a, 0.
- %cond1 = select i1 %cmp1, float %a, float 0.
+ %cond1 = select nsz i1 %cmp1, float %a, float 0.
%cmp2 = fcmp fast ogt float %cond1, 0.
- %cond2 = select i1 %cmp2, float %cond1, float 0.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float 0.
ret float %cond2
}
@@ -342,9 +342,9 @@ define float @fp-armv8_vminmaxnm_neg0(float %a) {
; CHECK: vminnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast olt float %a, -0.
- %cond1 = select i1 %cmp1, float %a, float -0.
+ %cond1 = select nsz i1 %cmp1, float %a, float -0.
%cmp2 = fcmp fast ugt float %cond1, -0.
- %cond2 = select i1 %cmp2, float %cond1, float -0.
+ %cond2 = select nsz i1 %cmp2, float %cond1, float -0.
ret float %cond2
}
@@ -354,9 +354,9 @@ define float @fp-armv8_vminmaxnm_e_0(float %a) {
; CHECK: vminnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast ule float 0., %a
- %cond1 = select i1 %cmp1, float 0., float %a
+ %cond1 = select nsz i1 %cmp1, float 0., float %a
%cmp2 = fcmp fast uge float 0., %cond1
- %cond2 = select i1 %cmp2, float 0., float %cond1
+ %cond2 = select nsz i1 %cmp2, float 0., float %cond1
ret float %cond2
}
@@ -366,9 +366,9 @@ define float @fp-armv8_vminmaxnm_e_neg0(float %a) {
; CHECK: vminnm.f32
; CHECK: vmaxnm.f32
%cmp1 = fcmp fast ule float -0., %a
- %cond1 = select i1 %cmp1, float -0., float %a
+ %cond1 = select nsz i1 %cmp1, float -0., float %a
%cmp2 = fcmp fast oge float -0., %cond1
- %cond2 = select i1 %cmp2, float -0., float %cond1
+ %cond2 = select nsz i1 %cmp2, float -0., float %cond1
ret float %cond2
}
More information about the llvm-commits
mailing list