[llvm] 7dde602 - [DAG] canCreateUndefOrPoison - add handling for ISD::SELECT (#146046)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 27 03:49:11 PDT 2025
Author: Simon Pilgrim
Date: 2025-06-27T11:49:08+01:00
New Revision: 7dde6027a0791267add993ca5d4450e44b75b870
URL: https://github.com/llvm/llvm-project/commit/7dde6027a0791267add993ca5d4450e44b75b870
DIFF: https://github.com/llvm/llvm-project/commit/7dde6027a0791267add993ca5d4450e44b75b870.diff
LOG: [DAG] canCreateUndefOrPoison - add handling for ISD::SELECT (#146046)
Followup to #143760 which handled ISD::VSELECT
I've moved ISD::SELECT/VSELECT under the "No poison except from flags
(which is handled above)" subgroup to try to remind people that these
can have poison generating FMFs (NINF/NNAN), even though this hasn't
been well explained anywhere I can find :(
Helps with regressions from #145939
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
llvm/test/CodeGen/RISCV/float-convert.ll
llvm/test/CodeGen/RISCV/rv64-float-convert.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 30ee6a99b9dfc..790d2972448fd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -5553,7 +5553,6 @@ bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts,
case ISD::BUILD_VECTOR:
case ISD::BUILD_PAIR:
case ISD::SPLAT_VECTOR:
- case ISD::VSELECT:
return false;
case ISD::SELECT_CC:
@@ -5577,6 +5576,8 @@ bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts,
case ISD::OR:
case ISD::ZERO_EXTEND:
+ case ISD::SELECT:
+ case ISD::VSELECT:
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
index a597faa028f22..5b4866c386793 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -158,7 +158,7 @@ define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
; GFX-950-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v7
; GFX-950-NEXT: v_cndmask_b32_e64 v2, -1, 1, s[2:3]
; GFX-950-NEXT: v_add_u32_e32 v2, v6, v2
-; GFX-950-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX-950-NEXT: s_or_b64 vcc, s[0:1], vcc
; GFX-950-NEXT: v_cvt_f32_f64_e32 v5, v[0:1]
; GFX-950-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
; GFX-950-NEXT: v_cvt_f64_f32_e32 v[2:3], v5
@@ -168,7 +168,7 @@ define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
; GFX-950-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v6
; GFX-950-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
; GFX-950-NEXT: v_add_u32_e32 v0, v5, v0
-; GFX-950-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX-950-NEXT: s_or_b64 vcc, s[0:1], vcc
; GFX-950-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v4
; GFX-950-NEXT: ; return to shader part epilog
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 797d59dfcdc86..1cb7b27dd69e4 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -643,12 +643,12 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IF-NEXT: addi a2, a3, -1
; RV32IF-NEXT: .LBB12_4: # %start
; RV32IF-NEXT: feq.s a3, fs0, fs0
-; RV32IF-NEXT: neg a4, s0
-; RV32IF-NEXT: neg a5, a1
+; RV32IF-NEXT: neg a4, a1
+; RV32IF-NEXT: neg a1, s0
; RV32IF-NEXT: neg a3, a3
-; RV32IF-NEXT: and a0, a4, a0
+; RV32IF-NEXT: and a0, a1, a0
; RV32IF-NEXT: and a1, a3, a2
-; RV32IF-NEXT: or a0, a5, a0
+; RV32IF-NEXT: or a0, a4, a0
; RV32IF-NEXT: and a0, a3, a0
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
index 6aae8984546ed..896e371452db8 100644
--- a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
@@ -125,26 +125,26 @@ define i128 @fptosi_sat_f32_to_i128(float %a) nounwind {
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fle.s s0, fa5, fa0
; RV64IF-NEXT: call __fixsfti
-; RV64IF-NEXT: li a3, -1
+; RV64IF-NEXT: li a2, -1
; RV64IF-NEXT: bnez s0, .LBB4_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: slli a1, a3, 63
+; RV64IF-NEXT: slli a1, a2, 63
; RV64IF-NEXT: .LBB4_2:
-; RV64IF-NEXT: lui a2, %hi(.LCPI4_0)
-; RV64IF-NEXT: flw fa5, %lo(.LCPI4_0)(a2)
-; RV64IF-NEXT: flt.s a2, fa5, fs0
-; RV64IF-NEXT: beqz a2, .LBB4_4
+; RV64IF-NEXT: lui a3, %hi(.LCPI4_0)
+; RV64IF-NEXT: flw fa5, %lo(.LCPI4_0)(a3)
+; RV64IF-NEXT: flt.s a3, fa5, fs0
+; RV64IF-NEXT: beqz a3, .LBB4_4
; RV64IF-NEXT: # %bb.3:
-; RV64IF-NEXT: srli a1, a3, 1
+; RV64IF-NEXT: srli a1, a2, 1
; RV64IF-NEXT: .LBB4_4:
-; RV64IF-NEXT: feq.s a3, fs0, fs0
+; RV64IF-NEXT: feq.s a2, fs0, fs0
+; RV64IF-NEXT: neg a3, a3
; RV64IF-NEXT: neg a4, s0
; RV64IF-NEXT: neg a2, a2
-; RV64IF-NEXT: neg a3, a3
; RV64IF-NEXT: and a0, a4, a0
-; RV64IF-NEXT: and a1, a3, a1
-; RV64IF-NEXT: or a0, a2, a0
-; RV64IF-NEXT: and a0, a3, a0
+; RV64IF-NEXT: and a1, a2, a1
+; RV64IF-NEXT: or a0, a3, a0
+; RV64IF-NEXT: and a0, a2, a0
; RV64IF-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IF-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64IF-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
More information about the llvm-commits
mailing list