[llvm] [DAGCombiner] Bail out if BitWidthDiff > BitWidth when folding cltz(and) - BitWidthDiff (PR #166607)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 5 10:23:45 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Hongyu Chen (XChy)
<details>
<summary>Changes</summary>
Fixes https://github.com/llvm/llvm-project/issues/166596
We cannot use `APInt::isMask` if `numBits` exceeds the bitwidth of APInt or `numBits` is zero. We avoid such a case by guaranteeing BitWidthDiff < BitWidth.
---
Full diff: https://github.com/llvm/llvm-project/pull/166607.diff
2 Files Affected:
- (modified) llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (+2)
- (modified) llvm/test/CodeGen/RISCV/zicond-opts.ll (+32)
``````````diff
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 8676060eb3db7..989a47b70bc34 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4029,6 +4029,8 @@ static SDValue foldSubCtlzNot(SDNode *N, SelectionDAG &DAG) {
m_ConstInt(AndMask)))) {
// Type Legalisation Pattern:
// (sub (ctlz (and (xor Op XorMask) AndMask)) BitWidthDiff)
+ if (BitWidthDiff.getZExtValue() >= BitWidth)
+ return SDValue();
unsigned AndMaskWidth = BitWidth - BitWidthDiff.getZExtValue();
if (!(AndMask.isMask(AndMaskWidth) && XorMask.countr_one() >= AndMaskWidth))
return SDValue();
diff --git a/llvm/test/CodeGen/RISCV/zicond-opts.ll b/llvm/test/CodeGen/RISCV/zicond-opts.ll
index d8e2b2c2bf58d..305ab934e44a7 100644
--- a/llvm/test/CodeGen/RISCV/zicond-opts.ll
+++ b/llvm/test/CodeGen/RISCV/zicond-opts.ll
@@ -263,3 +263,35 @@ define i64 @test_inv_and_eqz(i64 %f, i64 %x, i1 %cond) {
%7 = and i64 %6, %f
ret i64 %7
}
+
+define i32 @pr166596(i32 %conv.i, i1 %iszero) #0 {
+; RV32ZICOND-LABEL: pr166596:
+; RV32ZICOND: # %bb.0: # %entry
+; RV32ZICOND-NEXT: andi a1, a1, 1
+; RV32ZICOND-NEXT: xori a0, a0, 1
+; RV32ZICOND-NEXT: zext.h a0, a0
+; RV32ZICOND-NEXT: clz a0, a0
+; RV32ZICOND-NEXT: addi a0, a0, 41
+; RV32ZICOND-NEXT: czero.nez a0, a0, a1
+; RV32ZICOND-NEXT: addi a0, a0, -9
+; RV32ZICOND-NEXT: ret
+;
+; RV64ZICOND-LABEL: pr166596:
+; RV64ZICOND: # %bb.0: # %entry
+; RV64ZICOND-NEXT: andi a1, a1, 1
+; RV64ZICOND-NEXT: xori a0, a0, 1
+; RV64ZICOND-NEXT: zext.h a0, a0
+; RV64ZICOND-NEXT: clz a0, a0
+; RV64ZICOND-NEXT: addi a0, a0, 9
+; RV64ZICOND-NEXT: czero.nez a0, a0, a1
+; RV64ZICOND-NEXT: addi a0, a0, -9
+; RV64ZICOND-NEXT: ret
+entry:
+ %not.i = xor i32 %conv.i, 1
+ %conv2.i = trunc i32 %not.i to i16
+ %conv22 = zext i16 %conv2.i to i64
+ %0 = call i64 @llvm.ctlz.i64(i64 %conv22, i1 false)
+ %cast = trunc i64 %0 to i32
+ %clzg = select i1 %iszero, i32 -9, i32 %cast
+ ret i32 %clzg
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/166607
More information about the llvm-commits
mailing list