[llvm] [InstCombine] Fold (x + y) & (2^C) -> x & 2^C when y % 2^(C+1) == 0 (PR #166935)

Shamshura Egor via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 7 05:15:59 PST 2025


https://github.com/egorshamshura created https://github.com/llvm/llvm-project/pull/166935

Fixies: https://github.com/llvm/llvm-project/issues/152797

alive2: https://alive2.llvm.org/ce/z/h8HYTo
godbolt: https://godbolt.org/z/Mqzs9W8q4

>From 63a5ae4a772620069241395042d44a14ab59e125 Mon Sep 17 00:00:00 2001
From: Shamshura Egor <shamshuraegor at gmail.com>
Date: Fri, 7 Nov 2025 12:58:04 +0000
Subject: [PATCH 1/2] Added tests.

---
 .../InstCombine/redundant-add-in-and.ll       | 69 +++++++++++++++++++
 1 file changed, 69 insertions(+)
 create mode 100644 llvm/test/Transforms/InstCombine/redundant-add-in-and.ll

diff --git a/llvm/test/Transforms/InstCombine/redundant-add-in-and.ll b/llvm/test/Transforms/InstCombine/redundant-add-in-and.ll
new file mode 100644
index 0000000000000..66c5ce557a8e5
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/redundant-add-in-and.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+define i1 @addition_and_bitwise1(ptr %0) {
+; CHECK-LABEL: define i1 @addition_and_bitwise1(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[V0:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[V0]], align 4
+; CHECK-NEXT:    [[V2:%.*]] = zext i32 [[V1]] to i64
+; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[V0]] to i64
+; CHECK-NEXT:    [[V4:%.*]] = add i64 [[V2]], [[V3]]
+; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V4]], 2
+; CHECK-NEXT:    [[V6:%.*]] = icmp eq i64 [[V5]], 0
+; CHECK-NEXT:    ret i1 [[V6]]
+;
+  %v0 = getelementptr inbounds nuw i8, ptr %0, i64 4
+  %v1 = load i32, ptr %v0, align 4
+  %v2 = zext i32 %v1 to i64
+  %v3 = ptrtoint ptr %v0 to i64
+  %v4 = add i64 %v2, %v3
+  %v5 = and i64 %v4, 2
+  %v6 = icmp eq i64 %v5, 0
+  ret i1 %v6
+}
+
+define i1 @addition_and_bitwise2(ptr %0) {
+; CHECK-LABEL: define i1 @addition_and_bitwise2(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[V0:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[V0]], align 16
+; CHECK-NEXT:    [[V2:%.*]] = zext i32 [[V1]] to i64
+; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[V0]] to i64
+; CHECK-NEXT:    [[V4:%.*]] = add i64 [[V2]], [[V3]]
+; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V4]], 4
+; CHECK-NEXT:    [[V6:%.*]] = icmp eq i64 [[V5]], 0
+; CHECK-NEXT:    ret i1 [[V6]]
+;
+  %v0 = getelementptr inbounds nuw i8, ptr %0, i64 4
+  %v1 = load i32, ptr %v0, align 16
+  %v2 = zext i32 %v1 to i64
+  %v3 = ptrtoint ptr %v0 to i64
+  %v4 = add i64 %v2, %v3
+  %v5 = and i64 %v4, 4
+  %v6 = icmp eq i64 %v5, 0
+  ret i1 %v6
+}
+
+define i1 @addition_and_bitwise3(ptr %0) {
+; CHECK-LABEL: define i1 @addition_and_bitwise3(
+; CHECK-SAME: ptr [[TMP0:%.*]]) {
+; CHECK-NEXT:    [[V0:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[V0]], align 16
+; CHECK-NEXT:    [[V2:%.*]] = zext i32 [[V1]] to i64
+; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[V0]] to i64
+; CHECK-NEXT:    [[V4:%.*]] = add i64 [[V3]], [[V2]]
+; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V4]], 4
+; CHECK-NEXT:    [[V6:%.*]] = icmp eq i64 [[V5]], 0
+; CHECK-NEXT:    ret i1 [[V6]]
+;
+  %v0 = getelementptr inbounds nuw i8, ptr %0, i64 4
+  %v1 = load i32, ptr %v0, align 16
+  %v2 = zext i32 %v1 to i64
+  %v3 = ptrtoint ptr %v0 to i64
+  %v4 = add i64 %v3, %v2
+  %v5 = and i64 %v4, 4
+  %v6 = icmp eq i64 %v5, 0
+  ret i1 %v6
+}
+

>From 341d11b2ee1ec7a47a53e2db6337169ddccf02ee Mon Sep 17 00:00:00 2001
From: Shamshura Egor <shamshuraegor at gmail.com>
Date: Fri, 7 Nov 2025 13:14:38 +0000
Subject: [PATCH 2/2] Added opt.

---
 .../InstCombine/InstCombineAndOrXor.cpp       | 13 ++++++++
 .../InstCombine/redundant-add-in-and.ll       | 19 ++++--------
 .../Transforms/LoopVectorize/induction.ll     | 30 +++++++++++++------
 3 files changed, 39 insertions(+), 23 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index cbaff294819a2..cf009c4647d94 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -2470,6 +2470,19 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
     return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Y);
   }
 
+  // (x + y) & (2^C) -> x & 2^C when y % 2^(C+1) == 0
+  if (match(Op0, m_Add(m_Value(X), m_Value(Y)))) {
+    const APInt *PowerC;
+    if (match(Op1, m_Power2(PowerC)) && !PowerC->isOne()) {
+      KnownBits YKnown = computeKnownBits(Y, &I);
+
+      APInt YMod = YKnown.Zero;
+      if (YMod.countTrailingZeros() > PowerC->logBase2() + 1) {
+        return BinaryOperator::CreateAnd(X, Op1);
+      }
+    }
+  }
+
   // Canonicalize:
   // (X +/- Y) & Y --> ~X & Y when Y is a power of 2.
   if (match(&I, m_c_And(m_Value(Y), m_OneUse(m_CombineOr(
diff --git a/llvm/test/Transforms/InstCombine/redundant-add-in-and.ll b/llvm/test/Transforms/InstCombine/redundant-add-in-and.ll
index 66c5ce557a8e5..85be06d769fcc 100644
--- a/llvm/test/Transforms/InstCombine/redundant-add-in-and.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-add-in-and.ll
@@ -6,11 +6,8 @@ define i1 @addition_and_bitwise1(ptr %0) {
 ; CHECK-SAME: ptr [[TMP0:%.*]]) {
 ; CHECK-NEXT:    [[V0:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4
 ; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[V0]], align 4
-; CHECK-NEXT:    [[V2:%.*]] = zext i32 [[V1]] to i64
-; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[V0]] to i64
-; CHECK-NEXT:    [[V4:%.*]] = add i64 [[V2]], [[V3]]
-; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V4]], 2
-; CHECK-NEXT:    [[V6:%.*]] = icmp eq i64 [[V5]], 0
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[V1]], 2
+; CHECK-NEXT:    [[V6:%.*]] = icmp eq i32 [[TMP2]], 0
 ; CHECK-NEXT:    ret i1 [[V6]]
 ;
   %v0 = getelementptr inbounds nuw i8, ptr %0, i64 4
@@ -28,11 +25,8 @@ define i1 @addition_and_bitwise2(ptr %0) {
 ; CHECK-SAME: ptr [[TMP0:%.*]]) {
 ; CHECK-NEXT:    [[V0:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4
 ; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[V0]], align 16
-; CHECK-NEXT:    [[V2:%.*]] = zext i32 [[V1]] to i64
-; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[V0]] to i64
-; CHECK-NEXT:    [[V4:%.*]] = add i64 [[V2]], [[V3]]
-; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V4]], 4
-; CHECK-NEXT:    [[V6:%.*]] = icmp eq i64 [[V5]], 0
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[V1]], 4
+; CHECK-NEXT:    [[V6:%.*]] = icmp eq i32 [[TMP2]], 0
 ; CHECK-NEXT:    ret i1 [[V6]]
 ;
   %v0 = getelementptr inbounds nuw i8, ptr %0, i64 4
@@ -49,11 +43,8 @@ define i1 @addition_and_bitwise3(ptr %0) {
 ; CHECK-LABEL: define i1 @addition_and_bitwise3(
 ; CHECK-SAME: ptr [[TMP0:%.*]]) {
 ; CHECK-NEXT:    [[V0:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4
-; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[V0]], align 16
-; CHECK-NEXT:    [[V2:%.*]] = zext i32 [[V1]] to i64
 ; CHECK-NEXT:    [[V3:%.*]] = ptrtoint ptr [[V0]] to i64
-; CHECK-NEXT:    [[V4:%.*]] = add i64 [[V3]], [[V2]]
-; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V4]], 4
+; CHECK-NEXT:    [[V5:%.*]] = and i64 [[V3]], 4
 ; CHECK-NEXT:    [[V6:%.*]] = icmp eq i64 [[V5]], 0
 ; CHECK-NEXT:    ret i1 [[V6]]
 ;
diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll
index 66e4de5da7955..04f0cf78b4081 100644
--- a/llvm/test/Transforms/LoopVectorize/induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction.ll
@@ -4274,10 +4274,14 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) {
 ; IND-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[K:%.*]], 2
 ; IND-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
 ; IND:       vector.scevcheck:
-; IND-NEXT:    [[DOTNOT:%.*]] = icmp ult i64 [[K]], 2147483649
-; IND-NEXT:    br i1 [[DOTNOT]], label [[VECTOR_PH:%.*]], label [[SCALAR_PH]]
+; IND-NEXT:    [[TMP5:%.*]] = and i64 [[K]], 2147483648
+; IND-NEXT:    [[TMP6:%.*]] = icmp ne i64 [[TMP5]], 0
+; IND-NEXT:    [[TMP7:%.*]] = add i64 [[K]], -4294967297
+; IND-NEXT:    [[TMP8:%.*]] = icmp ult i64 [[TMP7]], -4294967296
+; IND-NEXT:    [[TMP4:%.*]] = or i1 [[TMP6]], [[TMP8]]
+; IND-NEXT:    br i1 [[TMP4]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
 ; IND:       vector.ph:
-; IND-NEXT:    [[N_VEC:%.*]] = and i64 [[K]], 4294967294
+; IND-NEXT:    [[N_VEC:%.*]] = and i64 [[K]], 6442450942
 ; IND-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; IND:       vector.body:
 ; IND-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -4314,10 +4318,14 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) {
 ; UNROLL-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[K:%.*]], 4
 ; UNROLL-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
 ; UNROLL:       vector.scevcheck:
-; UNROLL-NEXT:    [[DOTNOT:%.*]] = icmp ult i64 [[K]], 2147483649
-; UNROLL-NEXT:    br i1 [[DOTNOT]], label [[VECTOR_PH:%.*]], label [[SCALAR_PH]]
+; UNROLL-NEXT:    [[TMP5:%.*]] = and i64 [[K]], 2147483648
+; UNROLL-NEXT:    [[TMP6:%.*]] = icmp ne i64 [[TMP5]], 0
+; UNROLL-NEXT:    [[TMP7:%.*]] = add i64 [[K]], -4294967297
+; UNROLL-NEXT:    [[TMP8:%.*]] = icmp ult i64 [[TMP7]], -4294967296
+; UNROLL-NEXT:    [[TMP9:%.*]] = or i1 [[TMP6]], [[TMP8]]
+; UNROLL-NEXT:    br i1 [[TMP9]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
 ; UNROLL:       vector.ph:
-; UNROLL-NEXT:    [[N_VEC:%.*]] = and i64 [[K]], 4294967292
+; UNROLL-NEXT:    [[N_VEC:%.*]] = and i64 [[K]], 6442450940
 ; UNROLL-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; UNROLL:       vector.body:
 ; UNROLL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -4402,10 +4410,14 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) {
 ; INTERLEAVE-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[K:%.*]], 8
 ; INTERLEAVE-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
 ; INTERLEAVE:       vector.scevcheck:
-; INTERLEAVE-NEXT:    [[DOTNOT:%.*]] = icmp ult i64 [[K]], 2147483649
-; INTERLEAVE-NEXT:    br i1 [[DOTNOT]], label [[VECTOR_PH:%.*]], label [[SCALAR_PH]]
+; INTERLEAVE-NEXT:    [[TMP5:%.*]] = and i64 [[K]], 2147483648
+; INTERLEAVE-NEXT:    [[TMP6:%.*]] = icmp ne i64 [[TMP5]], 0
+; INTERLEAVE-NEXT:    [[TMP7:%.*]] = add i64 [[K]], -4294967297
+; INTERLEAVE-NEXT:    [[TMP8:%.*]] = icmp ult i64 [[TMP7]], -4294967296
+; INTERLEAVE-NEXT:    [[TMP9:%.*]] = or i1 [[TMP6]], [[TMP8]]
+; INTERLEAVE-NEXT:    br i1 [[TMP9]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
 ; INTERLEAVE:       vector.ph:
-; INTERLEAVE-NEXT:    [[N_VEC:%.*]] = and i64 [[K]], 4294967288
+; INTERLEAVE-NEXT:    [[N_VEC:%.*]] = and i64 [[K]], 6442450936
 ; INTERLEAVE-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; INTERLEAVE:       vector.body:
 ; INTERLEAVE-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]



More information about the llvm-commits mailing list