[llvm] [InstCombine] Fold ((X + AddC) & Mask) ^ Mask to (~AddC - X) & Mask (PR #174278)

via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 12 09:26:38 PST 2026


https://github.com/ParkHanbum updated https://github.com/llvm/llvm-project/pull/174278

>From 65ed70994fd394fcc1dc3f69e4f761ca394fda56 Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Sun, 4 Jan 2026 00:03:03 +0900
Subject: [PATCH 01/13] add testcases for upcoming patch

---
 .../Transforms/InstCombine/and-xor-merge.ll   | 113 ++++++++++++++++++
 1 file changed, 113 insertions(+)

diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index cf1285cbc11a4..7403acb172df7 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -79,3 +79,116 @@ define i32 @PR75692_3(i32 %x, i32 %y) {
   %t4 = and i32 %t2, %t3
   ret i32 %t4
 }
+
+; ((X + C) & M) ^ M --> ((M − C) − X) & M
+define i8 @add_and_xor_basic(i8 %x) {
+; CHECK-LABEL: @add_and_xor_basic(
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, 5
+  %and = and i8 %add, 15
+  %xor = xor i8 %and, 15
+  ret i8 %xor
+}
+
+define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
+; CHECK-LABEL: @add_and_xor_vector_splat(
+; CHECK-NEXT:    [[ADD:%.*]] = add <4 x i32> [[X:%.*]], splat (i32 10)
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[ADD]], splat (i32 63)
+; CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i32> [[AND]], splat (i32 63)
+; CHECK-NEXT:    ret <4 x i32> [[XOR]]
+;
+  %add = add <4 x i32> %x, <i32 10, i32 10, i32 10, i32 10>
+  %and = and <4 x i32> %add, <i32 63, i32 63, i32 63, i32 63>
+  %xor = xor <4 x i32> %and, <i32 63, i32 63, i32 63, i32 63>
+  ret <4 x i32> %xor
+}
+
+define i32 @add_and_xor_overflow_addc(i32 %x) {
+; CHECK-LABEL: @add_and_xor_overflow_addc(
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], 4
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 31
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 31
+; CHECK-NEXT:    ret i32 [[XOR]]
+;
+  %add = add i32 %x, 100
+  %and = and i32 %add, 31
+  %xor = xor i32 %and, 31
+  ret i32 %xor
+}
+
+define i32 @add_and_xor_negative_addc(i32 %x) {
+; CHECK-LABEL: @add_and_xor_negative_addc(
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], 254
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 255
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 255
+; CHECK-NEXT:    ret i32 [[XOR]]
+;
+  %add = add i32 %x, -2
+  %and = and i32 %add, 255
+  %xor = xor i32 %and, 255
+  ret i32 %xor
+}
+
+; This test is trasformed to 'xor(and(add x, 11), 15), 15)' and being applied.
+define i8 @add_and_xor_sub_op(i8 %x) {
+; CHECK-LABEL: @add_and_xor_sub_op(
+; CHECK-NEXT:    [[SUB:%.*]] = add i8 [[X:%.*]], 11
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[SUB]], 15
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %sub = sub i8 %x, 5
+  %and = and i8 %sub, 15
+  %xor = xor i8 %and, 15
+  ret i8 %xor
+}
+
+
+; and_xor_mask negative tests
+
+define i8 @neg_add_and_xor_mask_mismatch(i8 %x) {
+; CHECK-LABEL: @neg_add_and_xor_mask_mismatch(
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 7
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, 5
+  %and = and i8 %add, 15
+  %xor = xor i8 %and, 7
+  ret i8 %xor
+}
+
+define i8 @neg_add_and_xor_not_low_mask(i8 %x) {
+; CHECK-LABEL: @neg_add_and_xor_not_low_mask(
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 16
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 16
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, 5
+  %and = and i8 %add, 16
+  %xor = xor i8 %and, 16
+  ret i8 %xor
+}
+
+define i8 @neg_add_and_xor_multi_use(i8 %x) {
+; CHECK-LABEL: @neg_add_and_xor_multi_use(
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
+; CHECK-NEXT:    call void @use(i8 [[ADD]])
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, 5
+  call void @use(i8 %add)
+  %and = and i8 %add, 15
+  %xor = xor i8 %and, 15
+  ret i8 %xor
+}
+
+declare void @use(i8)

>From e3b6f9aa41fbb7783ad132d3e9d8932c4d0f351a Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Sat, 3 Jan 2026 22:30:45 +0900
Subject: [PATCH 02/13] Fold ((X + AddC) & Mask) ^ Mask to ((Mask - AddC) - X)
 & Mask

This patch optimizes specific pattern.

((X + AddC) & Mask) ^ Mask
-> ((Mask - AddC) - X) & Mask

Proof: https://alive2.llvm.org/ce/z/oekFkb
Fixed: #128475
---
 .../InstCombine/InstCombineAndOrXor.cpp       | 24 +++++++++++++++
 .../Transforms/InstCombine/and-xor-merge.ll   | 29 ++++++++-----------
 2 files changed, 36 insertions(+), 17 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 9cf382f8020fa..f9f03f29117c3 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5179,6 +5179,27 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
   return nullptr;
 }
 
+// ((X + C) & M) ^ M --> ((M − C) − X) & M
+static Instruction *foldAndWithMask(BinaryOperator &I,
+                                    InstCombiner::BuilderTy &Builder) {
+  Value *InnerVal;
+  const APInt *AndMask, *XorMask, *AddC;
+
+  if (match(&I, m_Xor(m_And(m_Add(m_Value(InnerVal), m_APInt(AddC)),
+                            m_APInt(AndMask)),
+                      m_APInt(XorMask))) &&
+      *AndMask == *XorMask && AndMask->isMask()) {
+    APInt NewConst = *AndMask - *AddC;
+    Value *NewSub =
+        Builder.CreateSub(ConstantInt::get(I.getType(), NewConst), InnerVal);
+
+    return BinaryOperator::CreateAnd(NewSub,
+                                     ConstantInt::get(I.getType(), *AndMask));
+  }
+
+  return nullptr;
+}
+
 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
 // here. We should standardize that construct where it is needed or choose some
 // other way to ensure that commutated variants of patterns are not missed.
@@ -5515,5 +5536,8 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
   if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
     return Res;
 
+  if (Instruction *Res = foldAndWithMask(I, Builder))
+    return Res;
+
   return nullptr;
 }
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index 7403acb172df7..e3784448e5b3c 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -83,10 +83,9 @@ define i32 @PR75692_3(i32 %x, i32 %y) {
 ; ((X + C) & M) ^ M --> ((M − C) − X) & M
 define i8 @add_and_xor_basic(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_basic(
-; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[ADD:%.*]] = sub i8 10, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
-; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
-; CHECK-NEXT:    ret i8 [[XOR]]
+; CHECK-NEXT:    ret i8 [[AND]]
 ;
   %add = add i8 %x, 5
   %and = and i8 %add, 15
@@ -96,10 +95,9 @@ define i8 @add_and_xor_basic(i8 %x) {
 
 define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
 ; CHECK-LABEL: @add_and_xor_vector_splat(
-; CHECK-NEXT:    [[ADD:%.*]] = add <4 x i32> [[X:%.*]], splat (i32 10)
+; CHECK-NEXT:    [[ADD:%.*]] = sub <4 x i32> splat (i32 53), [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[ADD]], splat (i32 63)
-; CHECK-NEXT:    [[XOR:%.*]] = xor <4 x i32> [[AND]], splat (i32 63)
-; CHECK-NEXT:    ret <4 x i32> [[XOR]]
+; CHECK-NEXT:    ret <4 x i32> [[AND]]
 ;
   %add = add <4 x i32> %x, <i32 10, i32 10, i32 10, i32 10>
   %and = and <4 x i32> %add, <i32 63, i32 63, i32 63, i32 63>
@@ -109,10 +107,9 @@ define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
 
 define i32 @add_and_xor_overflow_addc(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_overflow_addc(
-; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], 4
+; CHECK-NEXT:    [[ADD:%.*]] = sub i32 27, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 31
-; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 31
-; CHECK-NEXT:    ret i32 [[XOR]]
+; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %add = add i32 %x, 100
   %and = and i32 %add, 31
@@ -122,10 +119,9 @@ define i32 @add_and_xor_overflow_addc(i32 %x) {
 
 define i32 @add_and_xor_negative_addc(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_negative_addc(
-; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], 254
+; CHECK-NEXT:    [[ADD:%.*]] = sub i32 1, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 255
-; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 255
-; CHECK-NEXT:    ret i32 [[XOR]]
+; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %add = add i32 %x, -2
   %and = and i32 %add, 255
@@ -136,10 +132,9 @@ define i32 @add_and_xor_negative_addc(i32 %x) {
 ; This test is trasformed to 'xor(and(add x, 11), 15), 15)' and being applied.
 define i8 @add_and_xor_sub_op(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_sub_op(
-; CHECK-NEXT:    [[SUB:%.*]] = add i8 [[X:%.*]], 11
+; CHECK-NEXT:    [[SUB:%.*]] = sub i8 4, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[SUB]], 15
-; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
-; CHECK-NEXT:    ret i8 [[XOR]]
+; CHECK-NEXT:    ret i8 [[AND]]
 ;
   %sub = sub i8 %x, 5
   %and = and i8 %sub, 15
@@ -180,8 +175,8 @@ define i8 @neg_add_and_xor_multi_use(i8 %x) {
 ; CHECK-LABEL: @neg_add_and_xor_multi_use(
 ; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
 ; CHECK-NEXT:    call void @use(i8 [[ADD]])
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
-; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 10, [[X]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i8 [[TMP1]], 15
 ; CHECK-NEXT:    ret i8 [[XOR]]
 ;
   %add = add i8 %x, 5

>From d32ff7aed1a435751f02a24d322ca3f04c761c71 Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Mon, 5 Jan 2026 01:24:31 +0900
Subject: [PATCH 03/13] fix matching pattern

---
 llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp | 7 ++++---
 llvm/test/Transforms/InstCombine/and-xor-merge.ll       | 4 ++--
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index f9f03f29117c3..b814d4cf368d4 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5185,10 +5185,11 @@ static Instruction *foldAndWithMask(BinaryOperator &I,
   Value *InnerVal;
   const APInt *AndMask, *XorMask, *AddC;
 
-  if (match(&I, m_Xor(m_And(m_Add(m_Value(InnerVal), m_APInt(AddC)),
-                            m_APInt(AndMask)),
+  if (match(&I, m_Xor(m_OneUse(m_And(
+                          m_OneUse(m_Add(m_Value(InnerVal), m_APInt(AddC))),
+                          m_LowBitMask(AndMask))),
                       m_APInt(XorMask))) &&
-      *AndMask == *XorMask && AndMask->isMask()) {
+      *AndMask == *XorMask) {
     APInt NewConst = *AndMask - *AddC;
     Value *NewSub =
         Builder.CreateSub(ConstantInt::get(I.getType(), NewConst), InnerVal);
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index e3784448e5b3c..5adf9915a9f95 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -175,8 +175,8 @@ define i8 @neg_add_and_xor_multi_use(i8 %x) {
 ; CHECK-LABEL: @neg_add_and_xor_multi_use(
 ; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
 ; CHECK-NEXT:    call void @use(i8 [[ADD]])
-; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 10, [[X]]
-; CHECK-NEXT:    [[XOR:%.*]] = and i8 [[TMP1]], 15
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 15
 ; CHECK-NEXT:    ret i8 [[XOR]]
 ;
   %add = add i8 %x, 5

>From 3c4b01c12777bc79cd643b290edd8c124ea4798f Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Thu, 29 Jan 2026 17:53:55 +0900
Subject: [PATCH 04/13] change function name

---
 llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index b814d4cf368d4..73dd7f8e48b84 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5180,8 +5180,8 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
 }
 
 // ((X + C) & M) ^ M --> ((M − C) − X) & M
-static Instruction *foldAndWithMask(BinaryOperator &I,
-                                    InstCombiner::BuilderTy &Builder) {
+static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
+                                            InstCombiner::BuilderTy &Builder) {
   Value *InnerVal;
   const APInt *AndMask, *XorMask, *AddC;
 
@@ -5537,7 +5537,7 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
   if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
     return Res;
 
-  if (Instruction *Res = foldAndWithMask(I, Builder))
+  if (Instruction *Res = foldMaskedAddXorPattern(I, Builder))
     return Res;
 
   return nullptr;

>From f75d2bcca5dee9138f3d5f98224aaa7327b244fc Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Thu, 29 Jan 2026 22:17:27 +0900
Subject: [PATCH 05/13] fix resolving logical errors when adding NSW

---
 .../InstCombine/InstCombineAndOrXor.cpp       | 15 +++++++-
 .../Transforms/InstCombine/and-xor-merge.ll   | 37 +++++++++++++++++++
 2 files changed, 50 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 73dd7f8e48b84..3ed1b70e36bcb 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5181,7 +5181,8 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
 
 // ((X + C) & M) ^ M --> ((M − C) − X) & M
 static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
-                                            InstCombiner::BuilderTy &Builder) {
+                                            InstCombiner::BuilderTy &Builder,
+                                            InstCombinerImpl &IC) {
   Value *InnerVal;
   const APInt *AndMask, *XorMask, *AddC;
 
@@ -5191,6 +5192,16 @@ static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
                       m_APInt(XorMask))) &&
       *AndMask == *XorMask) {
     APInt NewConst = *AndMask - *AddC;
+    unsigned BitWidth = I.getType()->getScalarSizeInBits();
+    ConstantRange Range =
+        computeConstantRange(InnerVal, /*signed*/ true,
+                             /*UseInstrInfo=*/true, &IC.getAssumptionCache(),
+                             &I, &IC.getDominatorTree(), 0);
+    // Since C <= (X + C) <= M always holds, 'nuw' check is unnecessary.
+    // Bail out if 'nsw' is implied to avoid creating poison for X=INT_MIN.
+    if (!Range.contains(APInt::getSignedMinValue(BitWidth)))
+      return nullptr;
+
     Value *NewSub =
         Builder.CreateSub(ConstantInt::get(I.getType(), NewConst), InnerVal);
 
@@ -5537,7 +5548,7 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
   if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
     return Res;
 
-  if (Instruction *Res = foldMaskedAddXorPattern(I, Builder))
+  if (Instruction *Res = foldMaskedAddXorPattern(I, Builder, *this))
     return Res;
 
   return nullptr;
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index 5adf9915a9f95..b53996b50b030 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -93,6 +93,43 @@ define i8 @add_and_xor_basic(i8 %x) {
   ret i8 %xor
 }
 
+; Negative test
+; Should not optimize Cases where `nsw` is added to the sub.
+define i32 @add_and_xor_nsw(i32 %x) {
+; CHECK-LABEL: @add_and_xor_nsw(
+; CHECK-NEXT:    [[IS_POS:%.*]] = icmp sgt i32 [[X:%.*]], -1
+; CHECK-NEXT:    call void @llvm.assume(i1 [[IS_POS]])
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[X]], 63
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 63
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 63
+; CHECK-NEXT:    ret i32 [[XOR]]
+;
+  %is_pos = icmp sgt i32 %x, -1
+  call void @llvm.assume(i1 %is_pos)
+  %add = add nuw nsw i32 %x, 63
+  %and = and i32 %add, 63
+  %xor = xor i32 %and, 63
+  ret i32 %xor
+}
+
+; Should not optimize Cases where `nsw` `nuw` is added to the sub.
+define i32 @add_and_xor_nsw_nuw(i32 %x) {
+; CHECK-LABEL: @add_and_xor_nsw_nuw(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 10
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[X]], 54
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 63
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 63
+; CHECK-NEXT:    ret i32 [[XOR]]
+;
+  %cmp = icmp ult i32 %x, 10
+  call void @llvm.assume(i1 %cmp)
+  %add = add i32 %x, 4294967286
+  %and = and i32 %add, 63
+  %xor = xor i32 %and, 63
+  ret i32 %xor
+}
+
 define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
 ; CHECK-LABEL: @add_and_xor_vector_splat(
 ; CHECK-NEXT:    [[ADD:%.*]] = sub <4 x i32> splat (i32 53), [[X:%.*]]

>From 50391605b430c42477c616a735654d415fc10ea6 Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Wed, 4 Feb 2026 16:30:50 +0900
Subject: [PATCH 06/13] fix infer nsw on the new sub only when we can prove it

---
 .../InstCombine/InstCombineAndOrXor.cpp       | 31 +++++++++----------
 .../Transforms/InstCombine/and-xor-merge.ll   | 23 ++++++--------
 2 files changed, 23 insertions(+), 31 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 3ed1b70e36bcb..f2ee0e3ee4cf4 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5184,26 +5184,23 @@ static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
                                             InstCombiner::BuilderTy &Builder,
                                             InstCombinerImpl &IC) {
   Value *InnerVal;
+  Value *AddOpVal;
   const APInt *AndMask, *XorMask, *AddC;
-
-  if (match(&I, m_Xor(m_OneUse(m_And(
-                          m_OneUse(m_Add(m_Value(InnerVal), m_APInt(AddC))),
-                          m_LowBitMask(AndMask))),
-                      m_APInt(XorMask))) &&
+  if (match(&I,
+            m_Xor(m_OneUse(m_And(m_OneUse(m_CombineAnd(
+                                     m_Value(AddOpVal),
+                                     m_Add(m_Value(InnerVal), m_APInt(AddC)))),
+                                 m_LowBitMask(AndMask))),
+                  m_APInt(XorMask))) &&
       *AndMask == *XorMask) {
     APInt NewConst = *AndMask - *AddC;
-    unsigned BitWidth = I.getType()->getScalarSizeInBits();
-    ConstantRange Range =
-        computeConstantRange(InnerVal, /*signed*/ true,
-                             /*UseInstrInfo=*/true, &IC.getAssumptionCache(),
-                             &I, &IC.getDominatorTree(), 0);
-    // Since C <= (X + C) <= M always holds, 'nuw' check is unnecessary.
-    // Bail out if 'nsw' is implied to avoid creating poison for X=INT_MIN.
-    if (!Range.contains(APInt::getSignedMinValue(BitWidth)))
-      return nullptr;
-
-    Value *NewSub =
-        Builder.CreateSub(ConstantInt::get(I.getType(), NewConst), InnerVal);
+    ConstantRange XRange = computeConstantRange(
+        InnerVal, /*signed*/ true, /*UseInstrInfo=*/true,
+        &IC.getAssumptionCache(), &I, &IC.getDominatorTree(), 0);
+    ConstantRange SubRange = ConstantRange(NewConst).sub(XRange);
+    bool InfersNSW = !SubRange.isSignWrappedSet();
+    Value *NewSub = Builder.CreateSub(ConstantInt::get(I.getType(), NewConst),
+                                      InnerVal, "", false, InfersNSW);
 
     return BinaryOperator::CreateAnd(NewSub,
                                      ConstantInt::get(I.getType(), *AndMask));
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index b53996b50b030..3f35551d3f1a5 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -83,7 +83,7 @@ define i32 @PR75692_3(i32 %x, i32 %y) {
 ; ((X + C) & M) ^ M --> ((M − C) − X) & M
 define i8 @add_and_xor_basic(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_basic(
-; CHECK-NEXT:    [[ADD:%.*]] = sub i8 10, [[X:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = sub nsw i8 10, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
 ; CHECK-NEXT:    ret i8 [[AND]]
 ;
@@ -93,16 +93,13 @@ define i8 @add_and_xor_basic(i8 %x) {
   ret i8 %xor
 }
 
-; Negative test
-; Should not optimize Cases where `nsw` is added to the sub.
 define i32 @add_and_xor_nsw(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_nsw(
 ; CHECK-NEXT:    [[IS_POS:%.*]] = icmp sgt i32 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[IS_POS]])
-; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[X]], 63
+; CHECK-NEXT:    [[ADD:%.*]] = sub nsw i32 0, [[X]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 63
-; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 63
-; CHECK-NEXT:    ret i32 [[XOR]]
+; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %is_pos = icmp sgt i32 %x, -1
   call void @llvm.assume(i1 %is_pos)
@@ -112,15 +109,13 @@ define i32 @add_and_xor_nsw(i32 %x) {
   ret i32 %xor
 }
 
-; Should not optimize Cases where `nsw` `nuw` is added to the sub.
 define i32 @add_and_xor_nsw_nuw(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_nsw_nuw(
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 10
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[X]], 54
+; CHECK-NEXT:    [[ADD:%.*]] = sub nsw i32 9, [[X]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 63
-; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 63
-; CHECK-NEXT:    ret i32 [[XOR]]
+; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %cmp = icmp ult i32 %x, 10
   call void @llvm.assume(i1 %cmp)
@@ -132,7 +127,7 @@ define i32 @add_and_xor_nsw_nuw(i32 %x) {
 
 define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
 ; CHECK-LABEL: @add_and_xor_vector_splat(
-; CHECK-NEXT:    [[ADD:%.*]] = sub <4 x i32> splat (i32 53), [[X:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = sub nsw <4 x i32> splat (i32 53), [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[ADD]], splat (i32 63)
 ; CHECK-NEXT:    ret <4 x i32> [[AND]]
 ;
@@ -144,7 +139,7 @@ define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
 
 define i32 @add_and_xor_overflow_addc(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_overflow_addc(
-; CHECK-NEXT:    [[ADD:%.*]] = sub i32 27, [[X:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = sub nsw i32 27, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 31
 ; CHECK-NEXT:    ret i32 [[AND]]
 ;
@@ -156,7 +151,7 @@ define i32 @add_and_xor_overflow_addc(i32 %x) {
 
 define i32 @add_and_xor_negative_addc(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_negative_addc(
-; CHECK-NEXT:    [[ADD:%.*]] = sub i32 1, [[X:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = sub nsw i32 1, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 255
 ; CHECK-NEXT:    ret i32 [[AND]]
 ;
@@ -169,7 +164,7 @@ define i32 @add_and_xor_negative_addc(i32 %x) {
 ; This test is trasformed to 'xor(and(add x, 11), 15), 15)' and being applied.
 define i8 @add_and_xor_sub_op(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_sub_op(
-; CHECK-NEXT:    [[SUB:%.*]] = sub i8 4, [[X:%.*]]
+; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i8 4, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[SUB]], 15
 ; CHECK-NEXT:    ret i8 [[AND]]
 ;

>From 2f47ac65a8f4e005e6eb14744f5e92ad3361198b Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Tue, 10 Feb 2026 01:05:14 +0900
Subject: [PATCH 07/13] Revert "fix infer nsw on the new sub only when we can
 prove it"

This reverts commit 50391605b430c42477c616a735654d415fc10ea6.
---
 .../InstCombine/InstCombineAndOrXor.cpp       | 31 ++++++++++---------
 .../Transforms/InstCombine/and-xor-merge.ll   | 23 ++++++++------
 2 files changed, 31 insertions(+), 23 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index f2ee0e3ee4cf4..3ed1b70e36bcb 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5184,23 +5184,26 @@ static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
                                             InstCombiner::BuilderTy &Builder,
                                             InstCombinerImpl &IC) {
   Value *InnerVal;
-  Value *AddOpVal;
   const APInt *AndMask, *XorMask, *AddC;
-  if (match(&I,
-            m_Xor(m_OneUse(m_And(m_OneUse(m_CombineAnd(
-                                     m_Value(AddOpVal),
-                                     m_Add(m_Value(InnerVal), m_APInt(AddC)))),
-                                 m_LowBitMask(AndMask))),
-                  m_APInt(XorMask))) &&
+
+  if (match(&I, m_Xor(m_OneUse(m_And(
+                          m_OneUse(m_Add(m_Value(InnerVal), m_APInt(AddC))),
+                          m_LowBitMask(AndMask))),
+                      m_APInt(XorMask))) &&
       *AndMask == *XorMask) {
     APInt NewConst = *AndMask - *AddC;
-    ConstantRange XRange = computeConstantRange(
-        InnerVal, /*signed*/ true, /*UseInstrInfo=*/true,
-        &IC.getAssumptionCache(), &I, &IC.getDominatorTree(), 0);
-    ConstantRange SubRange = ConstantRange(NewConst).sub(XRange);
-    bool InfersNSW = !SubRange.isSignWrappedSet();
-    Value *NewSub = Builder.CreateSub(ConstantInt::get(I.getType(), NewConst),
-                                      InnerVal, "", false, InfersNSW);
+    unsigned BitWidth = I.getType()->getScalarSizeInBits();
+    ConstantRange Range =
+        computeConstantRange(InnerVal, /*signed*/ true,
+                             /*UseInstrInfo=*/true, &IC.getAssumptionCache(),
+                             &I, &IC.getDominatorTree(), 0);
+    // Since C <= (X + C) <= M always holds, 'nuw' check is unnecessary.
+    // Bail out if 'nsw' is implied to avoid creating poison for X=INT_MIN.
+    if (!Range.contains(APInt::getSignedMinValue(BitWidth)))
+      return nullptr;
+
+    Value *NewSub =
+        Builder.CreateSub(ConstantInt::get(I.getType(), NewConst), InnerVal);
 
     return BinaryOperator::CreateAnd(NewSub,
                                      ConstantInt::get(I.getType(), *AndMask));
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index 3f35551d3f1a5..b53996b50b030 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -83,7 +83,7 @@ define i32 @PR75692_3(i32 %x, i32 %y) {
 ; ((X + C) & M) ^ M --> ((M − C) − X) & M
 define i8 @add_and_xor_basic(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_basic(
-; CHECK-NEXT:    [[ADD:%.*]] = sub nsw i8 10, [[X:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = sub i8 10, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 15
 ; CHECK-NEXT:    ret i8 [[AND]]
 ;
@@ -93,13 +93,16 @@ define i8 @add_and_xor_basic(i8 %x) {
   ret i8 %xor
 }
 
+; Negative test
+; Should not optimize Cases where `nsw` is added to the sub.
 define i32 @add_and_xor_nsw(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_nsw(
 ; CHECK-NEXT:    [[IS_POS:%.*]] = icmp sgt i32 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[IS_POS]])
-; CHECK-NEXT:    [[ADD:%.*]] = sub nsw i32 0, [[X]]
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[X]], 63
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 63
-; CHECK-NEXT:    ret i32 [[AND]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 63
+; CHECK-NEXT:    ret i32 [[XOR]]
 ;
   %is_pos = icmp sgt i32 %x, -1
   call void @llvm.assume(i1 %is_pos)
@@ -109,13 +112,15 @@ define i32 @add_and_xor_nsw(i32 %x) {
   ret i32 %xor
 }
 
+; Should not optimize Cases where `nsw` `nuw` is added to the sub.
 define i32 @add_and_xor_nsw_nuw(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_nsw_nuw(
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 10
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    [[ADD:%.*]] = sub nsw i32 9, [[X]]
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[X]], 54
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 63
-; CHECK-NEXT:    ret i32 [[AND]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 63
+; CHECK-NEXT:    ret i32 [[XOR]]
 ;
   %cmp = icmp ult i32 %x, 10
   call void @llvm.assume(i1 %cmp)
@@ -127,7 +132,7 @@ define i32 @add_and_xor_nsw_nuw(i32 %x) {
 
 define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
 ; CHECK-LABEL: @add_and_xor_vector_splat(
-; CHECK-NEXT:    [[ADD:%.*]] = sub nsw <4 x i32> splat (i32 53), [[X:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = sub <4 x i32> splat (i32 53), [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[ADD]], splat (i32 63)
 ; CHECK-NEXT:    ret <4 x i32> [[AND]]
 ;
@@ -139,7 +144,7 @@ define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
 
 define i32 @add_and_xor_overflow_addc(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_overflow_addc(
-; CHECK-NEXT:    [[ADD:%.*]] = sub nsw i32 27, [[X:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = sub i32 27, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 31
 ; CHECK-NEXT:    ret i32 [[AND]]
 ;
@@ -151,7 +156,7 @@ define i32 @add_and_xor_overflow_addc(i32 %x) {
 
 define i32 @add_and_xor_negative_addc(i32 %x) {
 ; CHECK-LABEL: @add_and_xor_negative_addc(
-; CHECK-NEXT:    [[ADD:%.*]] = sub nsw i32 1, [[X:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = sub i32 1, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 255
 ; CHECK-NEXT:    ret i32 [[AND]]
 ;
@@ -164,7 +169,7 @@ define i32 @add_and_xor_negative_addc(i32 %x) {
 ; This test is trasformed to 'xor(and(add x, 11), 15), 15)' and being applied.
 define i8 @add_and_xor_sub_op(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_sub_op(
-; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i8 4, [[X:%.*]]
+; CHECK-NEXT:    [[SUB:%.*]] = sub i8 4, [[X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[SUB]], 15
 ; CHECK-NEXT:    ret i8 [[AND]]
 ;

>From 599075662e0398c1c0ae3504aec3c8c153df396b Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Tue, 10 Feb 2026 01:13:04 +0900
Subject: [PATCH 08/13] Revert "fix resolving logical errors when adding NSW"

This reverts commit f75d2bcca5dee9138f3d5f98224aaa7327b244fc.
---
 .../InstCombine/InstCombineAndOrXor.cpp       | 15 +-------
 .../Transforms/InstCombine/and-xor-merge.ll   | 37 -------------------
 2 files changed, 2 insertions(+), 50 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 3ed1b70e36bcb..73dd7f8e48b84 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5181,8 +5181,7 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
 
 // ((X + C) & M) ^ M --> ((M − C) − X) & M
 static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
-                                            InstCombiner::BuilderTy &Builder,
-                                            InstCombinerImpl &IC) {
+                                            InstCombiner::BuilderTy &Builder) {
   Value *InnerVal;
   const APInt *AndMask, *XorMask, *AddC;
 
@@ -5192,16 +5191,6 @@ static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
                       m_APInt(XorMask))) &&
       *AndMask == *XorMask) {
     APInt NewConst = *AndMask - *AddC;
-    unsigned BitWidth = I.getType()->getScalarSizeInBits();
-    ConstantRange Range =
-        computeConstantRange(InnerVal, /*signed*/ true,
-                             /*UseInstrInfo=*/true, &IC.getAssumptionCache(),
-                             &I, &IC.getDominatorTree(), 0);
-    // Since C <= (X + C) <= M always holds, 'nuw' check is unnecessary.
-    // Bail out if 'nsw' is implied to avoid creating poison for X=INT_MIN.
-    if (!Range.contains(APInt::getSignedMinValue(BitWidth)))
-      return nullptr;
-
     Value *NewSub =
         Builder.CreateSub(ConstantInt::get(I.getType(), NewConst), InnerVal);
 
@@ -5548,7 +5537,7 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
   if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
     return Res;
 
-  if (Instruction *Res = foldMaskedAddXorPattern(I, Builder, *this))
+  if (Instruction *Res = foldMaskedAddXorPattern(I, Builder))
     return Res;
 
   return nullptr;
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index b53996b50b030..5adf9915a9f95 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -93,43 +93,6 @@ define i8 @add_and_xor_basic(i8 %x) {
   ret i8 %xor
 }
 
-; Negative test
-; Should not optimize Cases where `nsw` is added to the sub.
-define i32 @add_and_xor_nsw(i32 %x) {
-; CHECK-LABEL: @add_and_xor_nsw(
-; CHECK-NEXT:    [[IS_POS:%.*]] = icmp sgt i32 [[X:%.*]], -1
-; CHECK-NEXT:    call void @llvm.assume(i1 [[IS_POS]])
-; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[X]], 63
-; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 63
-; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 63
-; CHECK-NEXT:    ret i32 [[XOR]]
-;
-  %is_pos = icmp sgt i32 %x, -1
-  call void @llvm.assume(i1 %is_pos)
-  %add = add nuw nsw i32 %x, 63
-  %and = and i32 %add, 63
-  %xor = xor i32 %and, 63
-  ret i32 %xor
-}
-
-; Should not optimize Cases where `nsw` `nuw` is added to the sub.
-define i32 @add_and_xor_nsw_nuw(i32 %x) {
-; CHECK-LABEL: @add_and_xor_nsw_nuw(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 10
-; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[X]], 54
-; CHECK-NEXT:    [[AND:%.*]] = and i32 [[ADD]], 63
-; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[AND]], 63
-; CHECK-NEXT:    ret i32 [[XOR]]
-;
-  %cmp = icmp ult i32 %x, 10
-  call void @llvm.assume(i1 %cmp)
-  %add = add i32 %x, 4294967286
-  %and = and i32 %add, 63
-  %xor = xor i32 %and, 63
-  ret i32 %xor
-}
-
 define <4 x i32> @add_and_xor_vector_splat(<4 x i32> %x) {
 ; CHECK-LABEL: @add_and_xor_vector_splat(
 ; CHECK-NEXT:    [[ADD:%.*]] = sub <4 x i32> splat (i32 53), [[X:%.*]]

>From b9e74d26e0f90272233086659d1b9b41ec9583a8 Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Thu, 12 Feb 2026 00:11:15 +0900
Subject: [PATCH 09/13] update newly logic and proof also

Proof: https://alive2.llvm.org/ce/z/JM4hsd
---
 .../InstCombine/InstCombineAndOrXor.cpp       | 21 +++++++++++--------
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 73dd7f8e48b84..a041ded132f2d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5179,21 +5179,24 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
   return nullptr;
 }
 
-// ((X + C) & M) ^ M --> ((M − C) − X) & M
+// ((X + C) & M) ^ M --> ((-1 − C) − X) & M
 static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
                                             InstCombiner::BuilderTy &Builder) {
   Value *InnerVal;
   const APInt *AndMask, *XorMask, *AddC;
+  BinaryOperator *AddInst;
 
-  if (match(&I, m_Xor(m_OneUse(m_And(
-                          m_OneUse(m_Add(m_Value(InnerVal), m_APInt(AddC))),
-                          m_LowBitMask(AndMask))),
-                      m_APInt(XorMask))) &&
+  if (match(&I,
+            m_Xor(m_OneUse(m_And(m_OneUse(m_CombineAnd(
+                                     m_BinOp(AddInst),
+                                     m_Add(m_Value(InnerVal), m_APInt(AddC)))),
+                                 m_APInt(AndMask))),
+                  m_APInt(XorMask))) &&
       *AndMask == *XorMask) {
-    APInt NewConst = *AndMask - *AddC;
-    Value *NewSub =
-        Builder.CreateSub(ConstantInt::get(I.getType(), NewConst), InnerVal);
-
+    APInt NewConst = ~(*AddC);
+    auto *NewSub = Builder.CreateSub(ConstantInt::get(I.getType(), NewConst),
+                                     InnerVal, "", AddInst->hasNoUnsignedWrap(),
+                                     AddInst->hasNoSignedWrap());
     return BinaryOperator::CreateAnd(NewSub,
                                      ConstantInt::get(I.getType(), *AndMask));
   }

>From ab81a61beefe1ce5fa251b2b0084115da62ec554 Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Thu, 12 Feb 2026 02:25:18 +0900
Subject: [PATCH 10/13] Generalize the pattern to support variable by matching
 with m_Value instead of m_APInt

---
 .../InstCombine/InstCombineAndOrXor.cpp       |  27 ++---
 .../Transforms/InstCombine/and-xor-merge.ll   | 104 +++++++++++++++---
 2 files changed, 100 insertions(+), 31 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index a041ded132f2d..3735eb217e515 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5179,26 +5179,21 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
   return nullptr;
 }
 
-// ((X + C) & M) ^ M --> ((-1 − C) − X) & M
+// ((X + C) & M) ^ M --> (~C − X) & M
 static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
                                             InstCombiner::BuilderTy &Builder) {
-  Value *InnerVal;
-  const APInt *AndMask, *XorMask, *AddC;
+  Value *X, *Mask, *AddC;
   BinaryOperator *AddInst;
 
-  if (match(&I,
-            m_Xor(m_OneUse(m_And(m_OneUse(m_CombineAnd(
-                                     m_BinOp(AddInst),
-                                     m_Add(m_Value(InnerVal), m_APInt(AddC)))),
-                                 m_APInt(AndMask))),
-                  m_APInt(XorMask))) &&
-      *AndMask == *XorMask) {
-    APInt NewConst = ~(*AddC);
-    auto *NewSub = Builder.CreateSub(ConstantInt::get(I.getType(), NewConst),
-                                     InnerVal, "", AddInst->hasNoUnsignedWrap(),
-                                     AddInst->hasNoSignedWrap());
-    return BinaryOperator::CreateAnd(NewSub,
-                                     ConstantInt::get(I.getType(), *AndMask));
+  if (match(&I, m_Xor(m_OneUse(m_And(m_OneUse(m_CombineAnd(
+                                         m_BinOp(AddInst),
+                                         m_Add(m_Value(X), m_Value(AddC)))),
+                                     m_Value(Mask))),
+                      m_Deferred(Mask)))) {
+    Value *NotC = Builder.CreateNot(AddC);
+    Value *NewSub = Builder.CreateSub(NotC, X, "", AddInst->hasNoUnsignedWrap(),
+                                      AddInst->hasNoSignedWrap());
+    return BinaryOperator::CreateAnd(NewSub, Mask);
   }
 
   return nullptr;
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index 5adf9915a9f95..8cb2788fda98a 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -80,7 +80,59 @@ define i32 @PR75692_3(i32 %x, i32 %y) {
   ret i32 %t4
 }
 
-; ((X + C) & M) ^ M --> ((M − C) − X) & M
+; ((X + C) & M) ^ M --> (~C − X) & M
+define i32 @add_and_xor_fomula_basic(i32 %x, i32 %AddC, i32 %M) {
+; CHECK-LABEL: @add_and_xor_fomula_basic(
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], [[ADDC:%.*]]
+; CHECK-NEXT:    [[AND1:%.*]] = xor i32 [[ADD]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M:%.*]], [[AND1]]
+; CHECK-NEXT:    ret i32 [[XOR]]
+;
+  %add = add i32 %x, %AddC
+  %and = and i32 %add, %M
+  %xor = xor i32 %and, %M
+  ret i32 %xor
+}
+
+define i32 @add_and_xor_fomula_nsw(i32 %x, i32 %AddC, i32 %M) {
+; CHECK-LABEL: @add_and_xor_fomula_nsw(
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[X:%.*]], [[ADDC:%.*]]
+; CHECK-NEXT:    [[AND1:%.*]] = xor i32 [[ADD]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M:%.*]], [[AND1]]
+; CHECK-NEXT:    ret i32 [[XOR]]
+;
+  %add = add nsw i32 %x, %AddC
+  %and = and i32 %add, %M
+  %xor = xor i32 %and, %M
+  ret i32 %xor
+}
+
+define i32 @add_and_xor_fomula_nuw(i32 %x, i32 %AddC, i32 %M) {
+; CHECK-LABEL: @add_and_xor_fomula_nuw(
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw i32 [[X:%.*]], [[ADDC:%.*]]
+; CHECK-NEXT:    [[AND1:%.*]] = xor i32 [[ADD]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M:%.*]], [[AND1]]
+; CHECK-NEXT:    ret i32 [[XOR]]
+;
+  %add = add nuw i32 %x, %AddC
+  %and = and i32 %add, %M
+  %xor = xor i32 %and, %M
+  ret i32 %xor
+}
+
+define i32 @add_and_xor_fomula_nsw_nuw(i32 %x, i32 %AddC, i32 %M) {
+; CHECK-LABEL: @add_and_xor_fomula_nsw_nuw(
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[X:%.*]], [[ADDC:%.*]]
+; CHECK-NEXT:    [[AND1:%.*]] = xor i32 [[ADD]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M:%.*]], [[AND1]]
+; CHECK-NEXT:    ret i32 [[XOR]]
+;
+  %add = add nuw nsw i32 %x, %AddC
+  %and = and i32 %add, %M
+  %xor = xor i32 %and, %M
+  ret i32 %xor
+}
+
 define i8 @add_and_xor_basic(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_basic(
 ; CHECK-NEXT:    [[ADD:%.*]] = sub i8 10, [[X:%.*]]
@@ -129,6 +181,42 @@ define i32 @add_and_xor_negative_addc(i32 %x) {
   ret i32 %xor
 }
 
+define i8 @add_and_xor_not_low_mask(i8 %x) {
+; CHECK-LABEL: @add_and_xor_not_low_mask(
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 26, [[X:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i8 [[TMP1]], 16
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, 5
+  %and = and i8 %add, 16
+  %xor = xor i8 %and, 16
+  ret i8 %xor
+}
+
+define i64 @add_and_xor_nsw(i32 %x) {
+; CHECK-LABEL: @add_and_xor_nsw(
+; CHECK-NEXT:    [[ASSUME_COND:%.*]] = icmp ult i32 [[X:%.*]], 65
+; CHECK-NEXT:    call void @llvm.assume(i1 [[ASSUME_COND]])
+; CHECK-NEXT:    [[TMP1:%.*]] = sub nsw i32 0, [[X]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[TMP1]], 63
+; CHECK-NEXT:    [[EXT:%.*]] = zext nneg i32 [[XOR]] to i64
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i64 -1, [[EXT]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP]], i64 0, i64 [[SHR]]
+; CHECK-NEXT:    ret i64 [[SEL]]
+;
+  %assume_cond = icmp ult i32 %x, 65
+  call void @llvm.assume(i1 %assume_cond)
+  %add = add nuw nsw i32 %x, 63
+  %and = and i32 %add, 63
+  %xor = xor i32 %and, 63
+  %ext = zext nneg i32 %xor to i64
+  %shr = lshr i64 -1, %ext
+  %cmp = icmp eq i32 %x, 0
+  %sel = select i1 %cmp, i64 0, i64 %shr
+  ret i64 %sel
+}
+
 ; This test is trasformed to 'xor(and(add x, 11), 15), 15)' and being applied.
 define i8 @add_and_xor_sub_op(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_sub_op(
@@ -142,7 +230,6 @@ define i8 @add_and_xor_sub_op(i8 %x) {
   ret i8 %xor
 }
 
-
 ; and_xor_mask negative tests
 
 define i8 @neg_add_and_xor_mask_mismatch(i8 %x) {
@@ -158,19 +245,6 @@ define i8 @neg_add_and_xor_mask_mismatch(i8 %x) {
   ret i8 %xor
 }
 
-define i8 @neg_add_and_xor_not_low_mask(i8 %x) {
-; CHECK-LABEL: @neg_add_and_xor_not_low_mask(
-; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ADD]], 16
-; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[AND]], 16
-; CHECK-NEXT:    ret i8 [[XOR]]
-;
-  %add = add i8 %x, 5
-  %and = and i8 %add, 16
-  %xor = xor i8 %and, 16
-  ret i8 %xor
-}
-
 define i8 @neg_add_and_xor_multi_use(i8 %x) {
 ; CHECK-LABEL: @neg_add_and_xor_multi_use(
 ; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 5

>From e5b873550443e32ce64d8912cd2195c51ae47df2 Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Thu, 12 Feb 2026 14:56:59 +0900
Subject: [PATCH 11/13] restrict AddC to a constant using m_ImmConstant to
 ensure the fold is profitable

---
 .../InstCombine/InstCombineAndOrXor.cpp       | 17 ++++---
 .../Transforms/InstCombine/and-xor-merge.ll   | 50 +++++++++----------
 2 files changed, 32 insertions(+), 35 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 3735eb217e515..ec160447ecc14 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5182,15 +5182,16 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
 // ((X + C) & M) ^ M --> (~C − X) & M
 static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
                                             InstCombiner::BuilderTy &Builder) {
-  Value *X, *Mask, *AddC;
+  Value *X, *Mask;
+  Constant *AddC;
   BinaryOperator *AddInst;
-
-  if (match(&I, m_Xor(m_OneUse(m_And(m_OneUse(m_CombineAnd(
-                                         m_BinOp(AddInst),
-                                         m_Add(m_Value(X), m_Value(AddC)))),
-                                     m_Value(Mask))),
-                      m_Deferred(Mask)))) {
-    Value *NotC = Builder.CreateNot(AddC);
+  if (match(&I,
+            m_Xor(m_OneUse(m_And(m_OneUse(m_CombineAnd(
+                                     m_BinOp(AddInst),
+                                     m_Add(m_Value(X), m_ImmConstant(AddC)))),
+                                 m_Value(Mask))),
+                  m_Deferred(Mask)))) {
+    Constant *NotC = ConstantExpr::getNot(AddC);
     Value *NewSub = Builder.CreateSub(NotC, X, "", AddInst->hasNoUnsignedWrap(),
                                       AddInst->hasNoSignedWrap());
     return BinaryOperator::CreateAnd(NewSub, Mask);
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index 8cb2788fda98a..1301636036408 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -81,56 +81,52 @@ define i32 @PR75692_3(i32 %x, i32 %y) {
 }
 
 ; ((X + C) & M) ^ M --> (~C − X) & M
-define i32 @add_and_xor_fomula_basic(i32 %x, i32 %AddC, i32 %M) {
+define i32 @add_and_xor_fomula_basic(i32 %x, i32 %M) {
 ; CHECK-LABEL: @add_and_xor_fomula_basic(
-; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], [[ADDC:%.*]]
-; CHECK-NEXT:    [[AND1:%.*]] = xor i32 [[ADD]], -1
+; CHECK-NEXT:    [[AND1:%.*]] = sub i32 0, [[X:%.*]]
 ; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M:%.*]], [[AND1]]
 ; CHECK-NEXT:    ret i32 [[XOR]]
 ;
-  %add = add i32 %x, %AddC
+  %add = add i32 %x, -1
   %and = and i32 %add, %M
   %xor = xor i32 %and, %M
   ret i32 %xor
 }
 
-define i32 @add_and_xor_fomula_nsw(i32 %x, i32 %AddC, i32 %M) {
+define i64 @add_and_xor_fomula_nsw(i64 %x, i64 %M) {
 ; CHECK-LABEL: @add_and_xor_fomula_nsw(
-; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[X:%.*]], [[ADDC:%.*]]
-; CHECK-NEXT:    [[AND1:%.*]] = xor i32 [[ADD]], -1
-; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M:%.*]], [[AND1]]
-; CHECK-NEXT:    ret i32 [[XOR]]
+; CHECK-NEXT:    [[AND1:%.*]] = sub i64 0, [[X:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i64 [[M:%.*]], [[AND1]]
+; CHECK-NEXT:    ret i64 [[XOR]]
 ;
-  %add = add nsw i32 %x, %AddC
-  %and = and i32 %add, %M
-  %xor = xor i32 %and, %M
-  ret i32 %xor
+  %add = add nsw i64 %x, -1
+  %and = and i64 %add, %M
+  %xor = xor i64 %and, %M
+  ret i64 %xor
 }
 
-define i32 @add_and_xor_fomula_nuw(i32 %x, i32 %AddC, i32 %M) {
+define i32 @add_and_xor_fomula_nuw(i32 %x, i32 %M) {
 ; CHECK-LABEL: @add_and_xor_fomula_nuw(
-; CHECK-NEXT:    [[ADD:%.*]] = add nuw i32 [[X:%.*]], [[ADDC:%.*]]
-; CHECK-NEXT:    [[AND1:%.*]] = xor i32 [[ADD]], -1
-; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M:%.*]], [[AND1]]
+; CHECK-NEXT:    [[M:%.*]] = sub i32 -2147483648, [[X:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M]], [[AND1:%.*]]
 ; CHECK-NEXT:    ret i32 [[XOR]]
 ;
-  %add = add nuw i32 %x, %AddC
+  %add = add nuw i32 %x, 2147483647
   %and = and i32 %add, %M
   %xor = xor i32 %and, %M
   ret i32 %xor
 }
 
-define i32 @add_and_xor_fomula_nsw_nuw(i32 %x, i32 %AddC, i32 %M) {
+define i64 @add_and_xor_fomula_nsw_nuw(i64 %x, i64 %M) {
 ; CHECK-LABEL: @add_and_xor_fomula_nsw_nuw(
-; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[X:%.*]], [[ADDC:%.*]]
-; CHECK-NEXT:    [[AND1:%.*]] = xor i32 [[ADD]], -1
-; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M:%.*]], [[AND1]]
-; CHECK-NEXT:    ret i32 [[XOR]]
+; CHECK-NEXT:    [[AND1:%.*]] = sub i64 -4191968325751275520, [[X:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i64 [[AND1]], [[M:%.*]]
+; CHECK-NEXT:    ret i64 [[XOR]]
 ;
-  %add = add nuw nsw i32 %x, %AddC
-  %and = and i32 %add, %M
-  %xor = xor i32 %and, %M
-  ret i32 %xor
+  %add = add nuw nsw i64 %x, 21474836479223372036854775807
+  %and = and i64 %add, %M
+  %xor = xor i64 %and, %M
+  ret i64 %xor
 }
 
 define i8 @add_and_xor_basic(i8 %x) {

>From bf0192a61b7d759e6aad37fb80a932df570a970d Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Fri, 13 Feb 2026 02:11:16 +0900
Subject: [PATCH 12/13] change method to create ~C

---
 llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index ec160447ecc14..c5f82194a513c 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5191,7 +5191,7 @@ static Instruction *foldMaskedAddXorPattern(BinaryOperator &I,
                                      m_Add(m_Value(X), m_ImmConstant(AddC)))),
                                  m_Value(Mask))),
                   m_Deferred(Mask)))) {
-    Constant *NotC = ConstantExpr::getNot(AddC);
+    Value *NotC = Builder.CreateNot(AddC);
     Value *NewSub = Builder.CreateSub(NotC, X, "", AddInst->hasNoUnsignedWrap(),
                                       AddInst->hasNoSignedWrap());
     return BinaryOperator::CreateAnd(NewSub, Mask);

>From 261ed6288a51c4f5142330af66dd64fb9f3a7ee0 Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Fri, 13 Feb 2026 02:26:13 +0900
Subject: [PATCH 13/13] Unnecessary tests have been removed, and tests against
 a wider variety of lowmasks have been added

---
 .../Transforms/InstCombine/and-xor-merge.ll   | 112 ++++++------------
 1 file changed, 38 insertions(+), 74 deletions(-)

diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index 1301636036408..a306fdb4698c6 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -81,54 +81,6 @@ define i32 @PR75692_3(i32 %x, i32 %y) {
 }
 
 ; ((X + C) & M) ^ M --> (~C − X) & M
-define i32 @add_and_xor_fomula_basic(i32 %x, i32 %M) {
-; CHECK-LABEL: @add_and_xor_fomula_basic(
-; CHECK-NEXT:    [[AND1:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M:%.*]], [[AND1]]
-; CHECK-NEXT:    ret i32 [[XOR]]
-;
-  %add = add i32 %x, -1
-  %and = and i32 %add, %M
-  %xor = xor i32 %and, %M
-  ret i32 %xor
-}
-
-define i64 @add_and_xor_fomula_nsw(i64 %x, i64 %M) {
-; CHECK-LABEL: @add_and_xor_fomula_nsw(
-; CHECK-NEXT:    [[AND1:%.*]] = sub i64 0, [[X:%.*]]
-; CHECK-NEXT:    [[XOR:%.*]] = and i64 [[M:%.*]], [[AND1]]
-; CHECK-NEXT:    ret i64 [[XOR]]
-;
-  %add = add nsw i64 %x, -1
-  %and = and i64 %add, %M
-  %xor = xor i64 %and, %M
-  ret i64 %xor
-}
-
-define i32 @add_and_xor_fomula_nuw(i32 %x, i32 %M) {
-; CHECK-LABEL: @add_and_xor_fomula_nuw(
-; CHECK-NEXT:    [[M:%.*]] = sub i32 -2147483648, [[X:%.*]]
-; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[M]], [[AND1:%.*]]
-; CHECK-NEXT:    ret i32 [[XOR]]
-;
-  %add = add nuw i32 %x, 2147483647
-  %and = and i32 %add, %M
-  %xor = xor i32 %and, %M
-  ret i32 %xor
-}
-
-define i64 @add_and_xor_fomula_nsw_nuw(i64 %x, i64 %M) {
-; CHECK-LABEL: @add_and_xor_fomula_nsw_nuw(
-; CHECK-NEXT:    [[AND1:%.*]] = sub i64 -4191968325751275520, [[X:%.*]]
-; CHECK-NEXT:    [[XOR:%.*]] = and i64 [[AND1]], [[M:%.*]]
-; CHECK-NEXT:    ret i64 [[XOR]]
-;
-  %add = add nuw nsw i64 %x, 21474836479223372036854775807
-  %and = and i64 %add, %M
-  %xor = xor i64 %and, %M
-  ret i64 %xor
-}
-
 define i8 @add_and_xor_basic(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_basic(
 ; CHECK-NEXT:    [[ADD:%.*]] = sub i8 10, [[X:%.*]]
@@ -179,38 +131,50 @@ define i32 @add_and_xor_negative_addc(i32 %x) {
 
 define i8 @add_and_xor_not_low_mask(i8 %x) {
 ; CHECK-LABEL: @add_and_xor_not_low_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 26, [[X:%.*]]
-; CHECK-NEXT:    [[XOR:%.*]] = and i8 [[TMP1]], 16
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 1, [[X:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i8 [[TMP1]], -18
 ; CHECK-NEXT:    ret i8 [[XOR]]
 ;
-  %add = add i8 %x, 5
-  %and = and i8 %add, 16
-  %xor = xor i8 %and, 16
+  %add = add i8 %x, -2
+  %and = and i8 %add, 238
+  %xor = xor i8 %and, 238
   ret i8 %xor
 }
 
-define i64 @add_and_xor_nsw(i32 %x) {
-; CHECK-LABEL: @add_and_xor_nsw(
-; CHECK-NEXT:    [[ASSUME_COND:%.*]] = icmp ult i32 [[X:%.*]], 65
-; CHECK-NEXT:    call void @llvm.assume(i1 [[ASSUME_COND]])
-; CHECK-NEXT:    [[TMP1:%.*]] = sub nsw i32 0, [[X]]
-; CHECK-NEXT:    [[XOR:%.*]] = and i32 [[TMP1]], 63
-; CHECK-NEXT:    [[EXT:%.*]] = zext nneg i32 [[XOR]] to i64
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i64 -1, [[EXT]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X]], 0
-; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP]], i64 0, i64 [[SHR]]
-; CHECK-NEXT:    ret i64 [[SEL]]
+define i8 @add_and_xor_not_low_mask2(i8 %x) {
+; CHECK-LABEL: @add_and_xor_not_low_mask2(
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 1, [[X:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i8 [[TMP1]], 119
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, -2
+  %and = and i8 %add, 119
+  %xor = xor i8 %and, 119
+  ret i8 %xor
+}
+
+define i8 @add_and_xor_not_low_mask3(i8 %x) {
+; CHECK-LABEL: @add_and_xor_not_low_mask3(
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 1, [[X:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i8 [[TMP1]], -91
+; CHECK-NEXT:    ret i8 [[XOR]]
 ;
-  %assume_cond = icmp ult i32 %x, 65
-  call void @llvm.assume(i1 %assume_cond)
-  %add = add nuw nsw i32 %x, 63
-  %and = and i32 %add, 63
-  %xor = xor i32 %and, 63
-  %ext = zext nneg i32 %xor to i64
-  %shr = lshr i64 -1, %ext
-  %cmp = icmp eq i32 %x, 0
-  %sel = select i1 %cmp, i64 0, i64 %shr
-  ret i64 %sel
+  %add = add i8 %x, -2
+  %and = and i8 %add, 165
+  %xor = xor i8 %and, 165
+  ret i8 %xor
+}
+
+define i8 @add_and_xor_not_low_mask4(i8 %x) {
+; CHECK-LABEL: @add_and_xor_not_low_mask4(
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 1, [[X:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = and i8 [[TMP1]], -86
+; CHECK-NEXT:    ret i8 [[XOR]]
+;
+  %add = add i8 %x, -2
+  %and = and i8 %add, 170
+  %xor = xor i8 %and, 170
+  ret i8 %xor
 }
 
 ; This test is trasformed to 'xor(and(add x, 11), 15), 15)' and being applied.



More information about the llvm-commits mailing list