[llvm] [InstCombine] fold `(a == 1 && b != 0) || (a != 0 && b == 0))` to `(a ==0) != (b == 0)` (PR #94915)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Jun 9 13:58:53 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms
Author: Zain Jaffal (zjaffal)
<details>
<summary>Changes</summary>
resolves https://github.com/llvm/llvm-project/issues/92966
---
Full diff: https://github.com/llvm/llvm-project/pull/94915.diff
2 Files Affected:
- (modified) llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp (+27)
- (added) llvm/test/Transforms/InstCombine/fold-a-or-b-zero.ll (+104)
``````````diff
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 8695e9e69df20..e873a86f3332f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -3421,6 +3421,29 @@ Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd);
}
+Value *foldAorBZero(BinaryOperator &I, InstCombiner::BuilderTy &Builder) {
+ Value *Op0 = I.getOperand(0);
+ Value *Op1 = I.getOperand(1);
+ if (!Op0->hasOneUse() || !Op1->hasOneUse())
+ return nullptr;
+
+ // match each operand of I with and
+ Value *A, *B;
+ CmpInst::Predicate Pred = CmpInst::ICMP_EQ;
+ CmpInst::Predicate InPred = CmpInst::ICMP_EQ;
+ bool IsOp0 = match(Op0, m_c_And(m_Cmp(Pred, m_Value(A), m_ZeroInt()),
+ m_Cmp(InPred, m_Value(B), m_ZeroInt())));
+ bool IsOp1 = match(Op1, m_c_And(m_Cmp(InPred, m_Specific(A), m_ZeroInt()),
+ m_Cmp(Pred, m_Specific(B), m_ZeroInt())));
+ if (!IsOp0 || !IsOp1)
+ return nullptr;
+
+ Constant *Zero = ConstantInt::getNullValue(A->getType());
+ auto *LHS = Builder.CreateICmpEQ(A, Zero);
+ auto *RHS = Builder.CreateICmpEQ(B, Zero);
+ return Builder.CreateICmpNE(LHS, RHS);
+}
+
// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
// here. We should standardize that construct where it is needed or choose some
// other way to ensure that commutated variants of patterns are not missed.
@@ -3450,6 +3473,10 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
return X;
+ // (A == 0 & B != 0) | (A != 0 & B == 0)) -> (A == 0) != (B == 0)
+ if (Value *V = foldAorBZero(I, Builder))
+ return replaceInstUsesWith(I, V);
+
// (A&B)|(A&C) -> A&(B|C) etc
if (Value *V = foldUsingDistributiveLaws(I))
return replaceInstUsesWith(I, V);
diff --git a/llvm/test/Transforms/InstCombine/fold-a-or-b-zero.ll b/llvm/test/Transforms/InstCombine/fold-a-or-b-zero.ll
new file mode 100644
index 0000000000000..5a68927e879a7
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/fold-a-or-b-zero.ll
@@ -0,0 +1,104 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -S -passes=instcombine | FileCheck %s
+
+declare void @use(i1)
+
+define void @a_or_b(i32 %a, i32 %b) {
+; CHECK-LABEL: define void @src(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[A_EQ_ZERO:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[B_NE_ZERO:%.*]] = icmp ne i32 [[B]], 0
+; CHECK-NEXT: [[AND_1:%.*]] = and i1 [[A_EQ_ZERO]], [[B_NE_ZERO]]
+; CHECK-NEXT: [[A_NE_ZERO:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[B_EQ_ZERO:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[A_NE_ZERO]], [[B_EQ_ZERO]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[AND_1]], [[AND_2]]
+; CHECK-NEXT: call void @use(i1 [[OR]])
+; CHECK-NEXT: ret void
+;
+ %a_eq_zero = icmp eq i32 %a, 0
+ %b_ne_zero = icmp ne i32 %b, 0
+ %and.1 = and i1 %a_eq_zero, %b_ne_zero
+ %a_ne_zero = icmp ne i32 %a, 0
+ %b_eq_zero = icmp eq i32 %b, 0
+ %and.2 = and i1 %a_ne_zero, %b_eq_zero
+ %or = or i1 %and.1, %and.2
+ call void @use(i1 %or)
+ ret void
+}
+
+
+define void @a_or_b_zero(i32 %a, i32 %b) {
+; CHECK-LABEL: define void @src(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[A_EQ_ZERO:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[B_NE_ZERO:%.*]] = icmp ne i32 [[B]], 0
+; CHECK-NEXT: [[AND_1:%.*]] = and i1 [[A_EQ_ZERO]], [[B_NE_ZERO]]
+; CHECK-NEXT: [[A_NE_ZERO:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[B_EQ_ZERO:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[A_NE_ZERO]], [[B_EQ_ZERO]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[AND_1]], [[AND_2]]
+; CHECK-NEXT: call void @use(i1 [[OR]])
+; CHECK-NEXT: ret void
+;
+ %a_eq_zero = icmp eq i32 %a, 0
+ %b_ne_zero = icmp ne i32 %b, 0
+ %and.1 = and i1 %a_eq_zero, %b_ne_zero
+ %a_ne_zero = icmp ne i32 %a, 0
+ %b_eq_zero = icmp eq i32 %b, 0
+ %and.2 = and i1 %a_ne_zero, %b_eq_zero
+ %or = or i1 %and.1, %and.2
+ call void @use(i1 %or)
+ ret void
+}
+
+define void @a_or_b_multiple_uses(i32 %a, i32 %b) {
+; CHECK-LABEL: define void @src(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[A_EQ_ZERO:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[B_NE_ZERO:%.*]] = icmp ne i32 [[B]], 0
+; CHECK-NEXT: [[AND_1:%.*]] = and i1 [[A_EQ_ZERO]], [[B_NE_ZERO]]
+; CHECK-NEXT: [[A_NE_ZERO:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[B_EQ_ZERO:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[A_NE_ZERO]], [[B_EQ_ZERO]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[AND_1]], [[AND_2]]
+; CHECK-NEXT: call void @use(i1 [[OR]])
+; CHECK-NEXT: ret void
+;
+ %a_eq_zero = icmp eq i32 %a, 0
+ %b_ne_zero = icmp ne i32 %b, 0
+ %and.1 = and i1 %a_eq_zero, %b_ne_zero
+ %a_ne_zero = icmp ne i32 %a, 0
+ %b_eq_zero = icmp eq i32 %b, 0
+ %and.2 = and i1 %a_ne_zero, %b_eq_zero
+ call void @use(i1 %and.2)
+ %or = or i1 %and.1, %and.2
+ ret void
+}
+
+define void @a_or_b_multiple_uses_2(i32 %a, i32 %b) {
+; CHECK-LABEL: define void @src(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[A_EQ_ZERO:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[B_NE_ZERO:%.*]] = icmp ne i32 [[B]], 0
+; CHECK-NEXT: [[AND_1:%.*]] = and i1 [[A_EQ_ZERO]], [[B_NE_ZERO]]
+; CHECK-NEXT: [[A_NE_ZERO:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[B_EQ_ZERO:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[A_NE_ZERO]], [[B_EQ_ZERO]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[AND_1]], [[AND_2]]
+; CHECK-NEXT: call void @use(i1 [[OR]])
+; CHECK-NEXT: ret void
+;
+ %a_eq_zero = icmp eq i32 %a, 0
+ %b_ne_zero = icmp ne i32 %b, 0
+ call void @use(i1 %b_ne_zero)
+ %and.1 = and i1 %a_eq_zero, %b_ne_zero
+ %a_ne_zero = icmp ne i32 %a, 0
+ %b_eq_zero = icmp eq i32 %b, 0
+ %and.2 = and i1 %a_ne_zero, %b_eq_zero
+ call void @use(i1 %and.1)
+ %or = or i1 %and.1, %and.2
+ ret void
+}
+
+
``````````
</details>
https://github.com/llvm/llvm-project/pull/94915
More information about the llvm-commits
mailing list