[llvm] [InstCombine] Fold ((X + AddC) & Mask) ^ Mask to ((Mask - AddC) - X) & Mask (PR #174278)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 29 00:55:02 PST 2026


https://github.com/ParkHanbum updated https://github.com/llvm/llvm-project/pull/174278

>From 89885faebc54a782b11f1790402fd556943139a4 Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Sun, 4 Jan 2026 00:03:03 +0900
Subject: [PATCH 1/4] add testcases for upcoming patch

---
 .../Transforms/InstCombine/and-xor-merge.ll   | 113 ++++++++++++++++++
 1 file changed, 113 insertions(+)

diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index cf1285cbc11a4..7403acb172df7 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -79,3 +79,116 @@ define i32 @PR75692_3(i32 %x, i32 %y) {
   %t4 = and i32 %t2, %t3
   ret i32 %t4
 }
+
+; ((X + C) & M) ^ M --> ((M − C) − X) & M
+define i8 @add_and_xor_basic(i8 %x) {
+; CHECK-LABEL: @add_and_xor_basic(
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, 5
+  %and = and i8 %add, 15
+  %xor = xor i8 %and, 15
+  ret i8 %xor
+}
+
+define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
+; CHECK-LABEL: @add_and_xor_vector_splat(
+; CHECK-NEXT:    [[ADD:%.*]] = add <4 x i32> [[X:%.*]], splat (i32 10)
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[ADD]], splat (i32 63)
+; CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i32> [[AND]], splat (i32 63)
+; CHECK-NEXT:    ret <4 x i32> [[XOR]]
+;
+  %add = add <4 x i32> %x, <i32 10, i32 10, i32 10, i32 10>
+  %and = and <4 x i32> %add, <i32 63, i32 63, i32 63, i32 63>
+  %xor = xor <4 x i32> %and, <i32 63, i32 63, i32 63, i32 63>
+  ret <4 x i32> %xor
+}
+
+define i32 @add_and_xor_overflow_addc(i32 %x) {
+; CHECK-LABEL: @add_and_xor_overflow_addc(
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], 4
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 31
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 31
+; CHECK-NEXT:    ret i32 [[XOR]]
+;
+  %add = add i32 %x, 100
+  %and = and i32 %add, 31
+  %xor = xor i32 %and, 31
+  ret i32 %xor
+}
+
+define i32 @add_and_xor_negative_addc(i32 %x) {
+; CHECK-LABEL: @add_and_xor_negative_addc(
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], 254
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 255
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 255
+; CHECK-NEXT:    ret i32 [[XOR]]
+;
+  %add = add i32 %x, -2
+  %and = and i32 %add, 255
+  %xor = xor i32 %and, 255
+  ret i32 %xor
+}
+
+; This test is trasformed to 'xor(and(add x, 11), 15), 15)' and being applied.
+define i8 @add_and_xor_sub_op(i8 %x) {
+; CHECK-LABEL: @add_and_xor_sub_op(
+; CHECK-NEXT:    [[SUB:%.*]] = add i8 [[X:%.*]], 11
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[SUB]], 15
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %sub = sub i8 %x, 5
+  %and = and i8 %sub, 15
+  %xor = xor i8 %and, 15
+  ret i8 %xor
+}
+
+
+; and_xor_mask negative tests
+
+define i8 @neg_add_and_xor_mask_mismatch(i8 %x) {
+; CHECK-LABEL: @neg_add_and_xor_mask_mismatch(
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 7
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, 5
+  %and = and i8 %add, 15
+  %xor = xor i8 %and, 7
+  ret i8 %xor
+}
+
+define i8 @neg_add_and_xor_not_low_mask(i8 %x) {
+; CHECK-LABEL: @neg_add_and_xor_not_low_mask(
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 16
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 16
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, 5
+  %and = and i8 %add, 16
+  %xor = xor i8 %and, 16
+  ret i8 %xor
+}
+
+define i8 @neg_add_and_xor_multi_use(i8 %x) {
+; CHECK-LABEL: @neg_add_and_xor_multi_use(
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
+; CHECK-NEXT:    call void @use(i8 [[ADD]])
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, 5
+  call void @use(i8 %add)
+  %and = and i8 %add, 15
+  %xor = xor i8 %and, 15
+  ret i8 %xor
+}
+
+declare void @use(i8)

>From d457e7a59eb5a85e9151dc0cbf5ea0b57b526a8a Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Sat, 3 Jan 2026 22:30:45 +0900
Subject: [PATCH 2/4] [InstCombine] Fold ((X + AddC) & Mask) ^ Mask to ((Mask -
 AddC) - X) & Mask

This patch optimizes specific pattern.

((X + AddC) & Mask) ^ Mask
-> ((Mask - AddC) - X) & Mask

Proof: https://alive2.llvm.org/ce/z/oekFkb
Fixed: #128475
---
 .../InstCombine/InstCombineAndOrXor.cpp       | 24 +++++++++++++++
 .../Transforms/InstCombine/and-xor-merge.ll   | 29 ++++++++-----------
 2 files changed, 36 insertions(+), 17 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index b23519fd9f77f..4ed7dea7391be 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5201,6 +5201,27 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
   return nullptr;
 }
 
+// ((X + C) & M) ^ M --> ((M − C) − X) & M
+static Instruction *foldAndWithMask(BinaryOperator &I,
+                                    InstCombiner::BuilderTy &Builder) {
+  Value *InnerVal;
+  const APInt *AndMask, *XorMask, *AddC;
+
+  if (match(&I, m_Xor(m_And(m_Add(m_Value(InnerVal), m_APInt(AddC)),
+                            m_APInt(AndMask)),
+                      m_APInt(XorMask))) &&
+      *AndMask == *XorMask && AndMask->isMask()) {
+    APInt NewConst = *AndMask - *AddC;
+    Value *NewSub =
+        Builder.CreateSub(ConstantInt::get(I.getType(), NewConst), InnerVal);
+
+    return BinaryOperator::CreateAnd(NewSub,
+                                     ConstantInt::get(I.getType(), *AndMask));
+  }
+
+  return nullptr;
+}
+
 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
 // here. We should standardize that construct where it is needed or choose some
 // other way to ensure that commutated variants of patterns are not missed.
@@ -5546,5 +5567,8 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
   if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
     return Res;
 
+  if (Instruction *Res = foldAndWithMask(I, Builder))
+    return Res;
+
   return nullptr;
 }
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index 7403acb172df7..e3784448e5b3c 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -83,10 +83,9 @@ define i32 @PR75692_3(i32 %x, i32 %y) {
 ; ((X + C) & M) ^ M --> ((M − C) − X) & M
 define i8 @add_and_xor_basic(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_basic(
-; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[ADD:%.*]] = sub i8 10, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
-; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
-; CHECK-NEXT:    ret i8 [[XOR]]
+; CHECK-NEXT:    ret i8 [[AND]]
 ;
   %add = add i8 %x, 5
   %and = and i8 %add, 15
@@ -96,10 +95,9 @@ define i8 @add_and_xor_basic(i8 %x) {
 
 define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
 ; CHECK-LABEL: @add_and_xor_vector_splat(
-; CHECK-NEXT:    [[ADD:%.*]] = add <4 x i32> [[X:%.*]], splat (i32 10)
+; CHECK-NEXT:    [[ADD:%.*]] = sub <4 x i32> splat (i32 53), [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[ADD]], splat (i32 63)
-; CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i32> [[AND]], splat (i32 63)
-; CHECK-NEXT:    ret <4 x i32> [[XOR]]
+; CHECK-NEXT:    ret <4 x i32> [[AND]]
 ;
   %add = add <4 x i32> %x, <i32 10, i32 10, i32 10, i32 10>
   %and = and <4 x i32> %add, <i32 63, i32 63, i32 63, i32 63>
@@ -109,10 +107,9 @@ define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
 
 define i32 @add_and_xor_overflow_addc(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_overflow_addc(
-; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], 4
+; CHECK-NEXT:    [[ADD:%.*]] = sub i32 27, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 31
-; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 31
-; CHECK-NEXT:    ret i32 [[XOR]]
+; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %add = add i32 %x, 100
   %and = and i32 %add, 31
@@ -122,10 +119,9 @@ define i32 @add_and_xor_overflow_addc(i32 %x) {
 
 define i32 @add_and_xor_negative_addc(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_negative_addc(
-; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], 254
+; CHECK-NEXT:    [[ADD:%.*]] = sub i32 1, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 255
-; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 255
-; CHECK-NEXT:    ret i32 [[XOR]]
+; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %add = add i32 %x, -2
   %and = and i32 %add, 255
@@ -136,10 +132,9 @@ define i32 @add_and_xor_negative_addc(i32 %x) {
 ; This test is trasformed to 'xor(and(add x, 11), 15), 15)' and being applied.
 define i8 @add_and_xor_sub_op(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_sub_op(
-; CHECK-NEXT:    [[SUB:%.*]] = add i8 [[X:%.*]], 11
+; CHECK-NEXT:    [[SUB:%.*]] = sub i8 4, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[SUB]], 15
-; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
-; CHECK-NEXT:    ret i8 [[XOR]]
+; CHECK-NEXT:    ret i8 [[AND]]
 ;
   %sub = sub i8 %x, 5
   %and = and i8 %sub, 15
@@ -180,8 +175,8 @@ define i8 @neg_add_and_xor_multi_use(i8 %x) {
 ; CHECK-LABEL: @neg_add_and_xor_multi_use(
 ; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
 ; CHECK-NEXT:    call void @use(i8 [[ADD]])
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
-; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 10, [[X]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i8 [[TMP1]], 15
 ; CHECK-NEXT:    ret i8 [[XOR]]
 ;
   %add = add i8 %x, 5

>From 60456ff81931ca3eba2986de7a208b1774d26dda Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Mon, 5 Jan 2026 01:24:31 +0900
Subject: [PATCH 3/4] fix matching pattern

---
 llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp | 7 ++++---
 llvm/test/Transforms/InstCombine/and-xor-merge.ll       | 4 ++--
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 4ed7dea7391be..c07d291c856c4 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5207,10 +5207,11 @@ static Instruction *foldAndWithMask(BinaryOperator &I,
   Value *InnerVal;
   const APInt *AndMask, *XorMask, *AddC;
 
-  if (match(&I, m_Xor(m_And(m_Add(m_Value(InnerVal), m_APInt(AddC)),
-                            m_APInt(AndMask)),
+  if (match(&I, m_Xor(m_OneUse(m_And(
+                          m_OneUse(m_Add(m_Value(InnerVal), m_APInt(AddC))),
+                          m_LowBitMask(AndMask))),
                       m_APInt(XorMask))) &&
-      *AndMask == *XorMask && AndMask->isMask()) {
+      *AndMask == *XorMask) {
     APInt NewConst = *AndMask - *AddC;
     Value *NewSub =
         Builder.CreateSub(ConstantInt::get(I.getType(), NewConst), InnerVal);
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index e3784448e5b3c..5adf9915a9f95 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -175,8 +175,8 @@ define i8 @neg_add_and_xor_multi_use(i8 %x) {
 ; CHECK-LABEL: @neg_add_and_xor_multi_use(
 ; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
 ; CHECK-NEXT:    call void @use(i8 [[ADD]])
-; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 10, [[X]]
-; CHECK-NEXT:    [[XOR:%.*]] = and i8 [[TMP1]], 15
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
 ; CHECK-NEXT:    ret i8 [[XOR]]
 ;
   %add = add i8 %x, 5

>From df4908be5de65e3b768640577e78a7458dfe05f3 Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Thu, 29 Jan 2026 17:53:55 +0900
Subject: [PATCH 4/4] change function name

---
 llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index c07d291c856c4..0f41eb834dc3f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5202,8 +5202,8 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
 }
 
 // ((X + C) & M) ^ M --> ((M − C) − X) & M
-static Instruction *foldAndWithMask(BinaryOperator &I,
-                                    InstCombiner::BuilderTy &Builder) {
+static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
+                                            InstCombiner::BuilderTy &Builder) {
   Value *InnerVal;
   const APInt *AndMask, *XorMask, *AddC;
 
@@ -5568,7 +5568,7 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
   if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
     return Res;
 
-  if (Instruction *Res = foldAndWithMask(I, Builder))
+  if (Instruction *Res = foldMaskedAddXorPattern(I, Builder))
     return Res;
 
   return nullptr;



More information about the llvm-commits mailing list