[llvm] [InstCombine] Improve `(icmp pred (and X, Y), ...)` fold. (PR #66787)

via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 17 22:57:42 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-transforms

Author: None (goldsteinn)

<details>
<summary>Changes</summary>

- [InstCombine] Make `isFreeToInvert` check recursively.
- [InstCombine] Add additional tests for free inversion; NFC
- [InstCombine] add `getFreeInverted` to perform folds for free inversion of op
- [InstCombine] Add tests for expanding `foldICmpWithLowBitMaskedVal`; NFC
- [InstCombine] Improve mask detection in `foldICmpWithLowBitMaskedVal`
- [InstCombine] Recognize `(icmp eq/ne (and X, ~Mask), 0)` pattern in `foldICmpWithLowBitMaskedVal`
- [InstCombine] Make the `(icmp eq/ne (and X, Y), X)` canonicalization work for non-const operands
- [InstCombine] Make `getKnownSign` a member function of InstCombiner; NFC
- [InstCombine] Add transforms `(icmp spred (and X, Y), X)` if `X` or `Y` are known signed/unsigned
- [InstCombine] Add tests for folding multiuse `(icmp eq/ne (or X, Y), Y)`; NFC
- [InstCombine] Folding multiuse `(icmp eq/ne (or X, Y), Y)` for 2 uses of `Y`
- [InstCombine] Add tests for transforming `(or/and (icmp eq/ne X,0),(icmp eq/ne X,Pow2OrZero))`; NFC
- [InstCombine] Add transforms for `(or/and (icmp eq/ne X,0),(icmp eq/ne X,Pow2OrZero))`


---

Patch is 38.32 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/66787.diff


12 Files Affected:

- (modified) llvm/include/llvm/Transforms/InstCombine/InstCombiner.h (+96-31) 
- (modified) llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp (+8-6) 
- (modified) llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp (+36-19) 
- (modified) llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp (+6-6) 
- (modified) llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp (+11-10) 
- (modified) llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp (+10-10) 
- (added) llvm/test/Transforms/InstCombine/free-inversion.ll (+377) 
- (modified) llvm/test/Transforms/InstCombine/icmp-of-or-x.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/minmax-intrinsics.ll (+9-10) 
- (modified) llvm/test/Transforms/InstCombine/pr38915.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/xor.ll (+12-12) 
- (modified) llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll (+1-1) 


``````````diff
diff --git a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
index f8b3874267ded3b..fcf72fd5ff9c678 100644
--- a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
+++ b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
@@ -233,49 +233,114 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner {
                                                 PatternMatch::m_Value()));
   }
 
-  /// Return true if the specified value is free to invert (apply ~ to).
-  /// This happens in cases where the ~ can be eliminated.  If WillInvertAllUses
-  /// is true, work under the assumption that the caller intends to remove all
-  /// uses of V and only keep uses of ~V.
-  ///
-  /// See also: canFreelyInvertAllUsersOf()
-  static bool isFreeToInvert(Value *V, bool WillInvertAllUses) {
+  /// Return nonnull value if V is free to invert (with condition) regarding
+  /// WillInvertAllUses.
+  /// If Builder is nonnull, it will return a simplified ~V
+  /// If Builder is null, it will return an arbitrary nonnull value (not
+  /// dereferenceable).
+  static Value *getFreelyInverted(Value *V, bool WillInvertAllUses,
+                                  BuilderTy *Builder, unsigned Depth = 0) {
+    using namespace llvm::PatternMatch;
+    static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
     // ~(~(X)) -> X.
-    if (match(V, m_Not(PatternMatch::m_Value())))
-      return true;
+    Value *A, *B;
+    if (match(V, m_Not(m_Value(A))))
+      return A;
 
+    Constant *C;
     // Constants can be considered to be not'ed values.
-    if (match(V, PatternMatch::m_AnyIntegralConstant()))
-      return true;
+    if (match(V, m_ImmConstant(C)))
+      return ConstantExpr::getNot(C);
+
+    if (Depth++ >= MaxAnalysisRecursionDepth)
+      return nullptr;
+
+    // The rest of the cases require that we invert all uses so don't bother
+    // doing the analysis if we know we can't use the result.
+    if (!WillInvertAllUses)
+      return nullptr;
 
     // Compares can be inverted if all of their uses are being modified to use
     // the ~V.
-    if (isa<CmpInst>(V))
-      return WillInvertAllUses;
+    if (auto *I = dyn_cast<CmpInst>(V)) {
+      if (Builder != nullptr)
+        return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
+                                  I->getOperand(1));
+      return NonNull;
+    }
 
     // If `V` is of the form `A + Constant` then `-1 - V` can be folded into
     // `(-1 - Constant) - A` if we are willing to invert all of the uses.
-    if (match(V, m_Add(PatternMatch::m_Value(), PatternMatch::m_ImmConstant())))
-      return WillInvertAllUses;
+    if (match(V, m_Add(m_Value(A), m_Value(B)))) {
+      if (auto *BV = getFreelyInverted(B, B->hasOneUse(), Builder, Depth))
+        return Builder ? Builder->CreateSub(BV, A) : NonNull;
+      if (auto *AV = getFreelyInverted(A, A->hasOneUse(), Builder, Depth))
+        return Builder ? Builder->CreateSub(AV, B) : NonNull;
+      return nullptr;
+    }
+
+    // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
+    // into `A ^ B` if we are willing to invert all of the uses.
+    if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
+      if (auto *BV = getFreelyInverted(B, B->hasOneUse(), Builder, Depth))
+        return Builder ? Builder->CreateXor(A, BV) : NonNull;
+      if (auto *AV = getFreelyInverted(A, A->hasOneUse(), Builder, Depth))
+        return Builder ? Builder->CreateXor(AV, B) : NonNull;
+      return nullptr;
+    }
 
     // If `V` is of the form `Constant - A` then `-1 - V` can be folded into
     // `A + (-1 - Constant)` if we are willing to invert all of the uses.
-    if (match(V, m_Sub(PatternMatch::m_ImmConstant(), PatternMatch::m_Value())))
-      return WillInvertAllUses;
-
-    // Selects with invertible operands are freely invertible
-    if (match(V,
-              m_Select(PatternMatch::m_Value(), m_Not(PatternMatch::m_Value()),
-                       m_Not(PatternMatch::m_Value()))))
-      return WillInvertAllUses;
-
-    // Min/max may be in the form of intrinsics, so handle those identically
-    // to select patterns.
-    if (match(V, m_MaxOrMin(m_Not(PatternMatch::m_Value()),
-                            m_Not(PatternMatch::m_Value()))))
-      return WillInvertAllUses;
-
-    return false;
+    if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
+      if (auto *AV = getFreelyInverted(A, A->hasOneUse(), Builder, Depth))
+        return Builder ? Builder->CreateAdd(AV, B) : NonNull;
+      return nullptr;
+    }
+
+    // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
+    // into `A s>> B` if we are willing to invert all of the uses.
+    if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
+      if (auto *AV = getFreelyInverted(A, A->hasOneUse(), Builder, Depth))
+        return Builder ? Builder->CreateAShr(AV, B) : NonNull;
+      return nullptr;
+    }
+
+    Value *Cond;
+    // LogicOps are special in that we canonicalize them at the cost of an
+    // instruction.
+    bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
+                    !shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(V));
+    // Selects/min/max with invertible operands are freely invertible
+    if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
+      if (!getFreelyInverted(A, A->hasOneUse(), /*Builder*/ nullptr, Depth))
+        return nullptr;
+      if (Value *NotB = getFreelyInverted(B, B->hasOneUse(), Builder, Depth)) {
+        if (Builder != nullptr) {
+          Value *NotA = getFreelyInverted(A, A->hasOneUse(), Builder, Depth);
+          assert(
+              NotA != nullptr &&
+              "Unable to build inverted value for known freely invertable op");
+          if (auto *II = dyn_cast<IntrinsicInst>(V))
+            return Builder->CreateBinaryIntrinsic(
+                getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
+          return Builder->CreateSelect(Cond, NotA, NotB);
+        }
+        return NonNull;
+      }
+    }
+
+    return nullptr;
+  }
+
+  /// Return true if the specified value is free to invert (apply ~ to).
+  /// This happens in cases where the ~ can be eliminated.  If WillInvertAllUses
+  /// is true, work under the assumption that the caller intends to remove all
+  /// uses of V and only keep uses of ~V.
+  ///
+  /// See also: canFreelyInvertAllUsersOf()
+  static bool isFreeToInvert(Value *V, bool WillInvertAllUses) {
+    return getFreelyInverted(V, WillInvertAllUses, /*Builder*/ nullptr,
+                             /*Depth*/ 0) != nullptr;
   }
 
   /// Given i1 V, can every user of V be freely adapted if V is changed to !V ?
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 318992b55e4f9f8..a7d801c452e9ad7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -2198,12 +2198,14 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   // (~X) - (~Y) --> Y - X
   // This is placed after the other reassociations and explicitly excludes a
   // sub-of-sub pattern to avoid infinite looping.
-  if (isFreeToInvert(Op0, Op0->hasOneUse()) &&
-      isFreeToInvert(Op1, Op1->hasOneUse()) &&
-      !match(Op0, m_Sub(m_ImmConstant(), m_Value()))) {
-    Value *NotOp0 = Builder.CreateNot(Op0);
-    Value *NotOp1 = Builder.CreateNot(Op1);
-    return BinaryOperator::CreateSub(NotOp1, NotOp0);
+  if (!match(Op0, m_Sub(m_ImmConstant(), m_Value())) &&
+      isFreeToInvert(Op0, Op0->hasOneUse())) {
+    if (Value *NotOp1 = getFreelyInverted(Op1, Op1->hasOneUse(), &Builder)) {
+      Value *NotOp0 = getFreelyInverted(Op0, Op0->hasOneUse(), &Builder);
+      assert(NotOp0 != nullptr &&
+             "isFreeToInvert desynced with getFreelyInverted");
+      return BinaryOperator::CreateSub(NotOp1, NotOp0);
+    }
   }
 
   auto m_AddRdx = [](Value *&Vec) {
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 46af9bf5eed003a..819bcb81f4a111d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -2503,16 +2503,26 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
       return BinaryOperator::CreateAnd(Op1, B);
 
     // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
-    if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
-      if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
-        if (Op1->hasOneUse() || isFreeToInvert(C, C->hasOneUse()))
-          return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C));
+    if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
+      if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) {
+        Value *NotC = Op1->hasOneUse()
+                          ? Builder.CreateNot(C)
+                          : getFreelyInverted(C, C->hasOneUse(), &Builder);
+        if (NotC != nullptr)
+          return BinaryOperator::CreateAnd(Op0, NotC);
+      }
+    }
 
     // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
-    if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
-      if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
-        if (Op0->hasOneUse() || isFreeToInvert(C, C->hasOneUse()))
+    if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B)))) {
+      if (match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) {
+        Value *NotC = Op0->hasOneUse()
+                          ? Builder.CreateNot(C)
+                          : getFreelyInverted(C, C->hasOneUse(), &Builder);
+        if (NotC != nullptr)
           return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
+      }
+    }
 
     // (A | B) & (~A ^ B) -> A & B
     // (A | B) & (B ^ ~A) -> A & B
@@ -3997,14 +4007,14 @@ static Instruction *visitMaskedMerge(BinaryOperator &I,
 static Instruction *sinkNotIntoXor(BinaryOperator &I, Value *X, Value *Y,
                                    InstCombiner::BuilderTy &Builder) {
   // We only want to do the transform if it is free to do.
-  if (InstCombiner::isFreeToInvert(X, X->hasOneUse())) {
-    // Ok, good.
-  } else if (InstCombiner::isFreeToInvert(Y, Y->hasOneUse())) {
+  Value *NotX = InstCombiner::getFreelyInverted(X, X->hasOneUse(), &Builder);
+  if (NotX == nullptr) {
     std::swap(X, Y);
-  } else
+    NotX = InstCombiner::getFreelyInverted(X, X->hasOneUse(), &Builder);
+  }
+  if (NotX == nullptr)
     return nullptr;
 
-  Value *NotX = Builder.CreateNot(X, X->getName() + ".not");
   return BinaryOperator::CreateXor(NotX, Y, I.getName() + ".demorgan");
 }
 
@@ -4317,13 +4327,15 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
   auto *II = dyn_cast<IntrinsicInst>(NotOp);
   if (II && II->hasOneUse()) {
     if (match(NotOp, m_MaxOrMin(m_Value(X), m_Value(Y))) &&
-        isFreeToInvert(X, X->hasOneUse()) &&
-        isFreeToInvert(Y, Y->hasOneUse())) {
-      Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
-      Value *NotX = Builder.CreateNot(X);
-      Value *NotY = Builder.CreateNot(Y);
-      Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, NotX, NotY);
-      return replaceInstUsesWith(I, InvMaxMin);
+        isFreeToInvert(X, X->hasOneUse())) {
+      if (Value *NotY = getFreelyInverted(Y, Y->hasOneUse(), &Builder)) {
+        Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
+        Value *NotX = getFreelyInverted(X, X->hasOneUse(), &Builder);
+        assert(NotX != nullptr &&
+               "isFreeToInvert desynced with getFreelyInverted");
+        Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, NotX, NotY);
+        return replaceInstUsesWith(I, InvMaxMin);
+      }
     }
     if (match(NotOp, m_c_MaxOrMin(m_Not(m_Value(X)), m_Value(Y)))) {
       Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
@@ -4374,6 +4386,11 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
   if (Instruction *NewXor = foldNotXor(I, Builder))
     return NewXor;
 
+  // TODO: Could handle multi-use better by checking if all uses of NotOp (other
+  // than I) can be inverted.
+  if (Value *R = getFreelyInverted(NotOp, NotOp->hasOneUse(), &Builder))
+    return replaceInstUsesWith(I, R);
+
   return nullptr;
 }
 
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 64cbfebf0102876..f8346cd03849acf 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1698,12 +1698,12 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
     auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * {
       Value *A;
       if (match(X, m_OneUse(m_Not(m_Value(A)))) &&
-          !isFreeToInvert(A, A->hasOneUse()) &&
-          isFreeToInvert(Y, Y->hasOneUse())) {
-        Value *NotY = Builder.CreateNot(Y);
-        Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
-        Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY);
-        return BinaryOperator::CreateNot(InvMaxMin);
+          !isFreeToInvert(A, A->hasOneUse())) {
+        if (Value *NotY = getFreelyInverted(Y, Y->hasOneUse(), &Builder)) {
+          Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
+          Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY);
+          return BinaryOperator::CreateNot(InvMaxMin);
+        }
       }
       return nullptr;
     };
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 9bc84c7dd6e1539..74865c487c2b2d2 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -3226,10 +3226,12 @@ Instruction *InstCombinerImpl::foldICmpBitCast(ICmpInst &Cmp) {
   // icmp eq/ne (bitcast (not X) to iN), -1 --> icmp eq/ne (bitcast X to iN), 0
   // Example: are all elements equal? --> are zero elements not equal?
   // TODO: Try harder to reduce compare of 2 freely invertible operands?
-  if (Cmp.isEquality() && C->isAllOnes() && Bitcast->hasOneUse() &&
-      isFreeToInvert(BCSrcOp, BCSrcOp->hasOneUse())) {
-    Value *Cast = Builder.CreateBitCast(Builder.CreateNot(BCSrcOp), DstType);
-    return new ICmpInst(Pred, Cast, ConstantInt::getNullValue(DstType));
+  if (Cmp.isEquality() && C->isAllOnes() && Bitcast->hasOneUse()) {
+    if (Value *NotBCSrcOp =
+            getFreelyInverted(BCSrcOp, BCSrcOp->hasOneUse(), &Builder)) {
+      Value *Cast = Builder.CreateBitCast(NotBCSrcOp, DstType);
+      return new ICmpInst(Pred, Cast, ConstantInt::getNullValue(DstType));
+    }
   }
 
   // If this is checking if all elements of an extended vector are clear or not,
@@ -4494,14 +4496,13 @@ static Instruction *foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q,
 
   if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) {
     // icmp (X | Y) eq/ne Y --> (X & ~Y) eq/ne 0 if Y is freely invertible
-    if (IC.isFreeToInvert(Op1, Op1->hasOneUse()))
-      return new ICmpInst(Pred,
-                          IC.Builder.CreateAnd(A, IC.Builder.CreateNot(Op1)),
+    if (Value *NotOp1 =
+            IC.getFreelyInverted(Op1, Op1->hasOneUse(), &IC.Builder))
+      return new ICmpInst(Pred, IC.Builder.CreateAnd(A, NotOp1),
                           Constant::getNullValue(Op1->getType()));
     // icmp (X | Y) eq/ne Y --> (~X | Y) eq/ne -1 if X  is freely invertible.
-    if (IC.isFreeToInvert(A, A->hasOneUse()))
-      return new ICmpInst(Pred,
-                          IC.Builder.CreateOr(Op1, IC.Builder.CreateNot(A)),
+    if (Value *NotA = IC.getFreelyInverted(A, A->hasOneUse(), &IC.Builder))
+      return new ICmpInst(Pred, IC.Builder.CreateOr(Op1, NotA),
                           Constant::getAllOnesValue(Op1->getType()));
   }
   return nullptr;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 71c2d68881441ac..2dda46986f0fd08 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -3075,18 +3075,18 @@ Instruction *InstCombinerImpl::foldSelectOfBools(SelectInst &SI) {
     return SelectInst::Create(TrueVal, OrV, Zero);
   }
   // select (c & b), a, b -> select b, (select ~c, true, a), false
-  if (match(CondVal, m_OneUse(m_c_And(m_Value(C), m_Specific(FalseVal)))) &&
-      isFreeToInvert(C, C->hasOneUse())) {
-    Value *NotC = Builder.CreateNot(C);
-    Value *OrV = Builder.CreateSelect(NotC, One, TrueVal);
-    return SelectInst::Create(FalseVal, OrV, Zero);
+  if (match(CondVal, m_OneUse(m_c_And(m_Value(C), m_Specific(FalseVal))))) {
+    if (Value *NotC = getFreelyInverted(C, C->hasOneUse(), &Builder)) {
+      Value *OrV = Builder.CreateSelect(NotC, One, TrueVal);
+      return SelectInst::Create(FalseVal, OrV, Zero);
+    }
   }
   // select (a | c), a, b -> select a, true, (select ~c, b, false)
-  if (match(CondVal, m_OneUse(m_c_Or(m_Specific(TrueVal), m_Value(C)))) &&
-      isFreeToInvert(C, C->hasOneUse())) {
-    Value *NotC = Builder.CreateNot(C);
-    Value *AndV = Builder.CreateSelect(NotC, FalseVal, Zero);
-    return SelectInst::Create(TrueVal, One, AndV);
+  if (match(CondVal, m_OneUse(m_c_Or(m_Specific(TrueVal), m_Value(C))))) {
+    if (Value *NotC = getFreelyInverted(C, C->hasOneUse(), &Builder)) {
+      Value *AndV = Builder.CreateSelect(NotC, FalseVal, Zero);
+      return SelectInst::Create(TrueVal, One, AndV);
+    }
   }
   // select (c & ~b), a, b -> select b, true, (select c, a, false)
   if (match(CondVal,
diff --git a/llvm/test/Transforms/InstCombine/free-inversion.ll b/llvm/test/Transforms/InstCombine/free-inversion.ll
new file mode 100644
index 000000000000000..4e5eef2b69b4dc0
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/free-inversion.ll
@@ -0,0 +1,377 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+declare i8 @llvm.smin.i8(i8, i8)
+declare i8 @llvm.umin.i8(i8, i8)
+declare i8 @llvm.smax.i8(i8, i8)
+declare i8 @llvm.umax.i8(i8, i8)
+
+declare void @use.i8(i8)
+
+define i8 @xor_1(i8 %a, i1 %c, i8 %x, i8 %y) {
+; CHECK-LABEL: @xor_1(
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -124
+; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[C:%.*]], i8 [[X:%.*]], i8 [[TMP1]]
+; CHECK-NEXT:    [[NOT_BA:%.*]] = xor i8 [[TMP2]], [[A:%.*]]
+; CHECK-NEXT:    ret i8 [[NOT_BA]]
+;
+  %nx = xor i8 %x, -1
+  %yy = xor i8 %y, 123
+  %b = select i1 %c, i8 %nx, i8 %yy
+  %ba = xor i8 %b, %a
+  %not_ba = xor i8 %ba, -1
+  ret i8 %not_ba
+}
+
+define i8 @xor_2(i8 %a, i1 %c, i8 %x, i8 %y) {
+; CHECK-LABEL: @xor_2(
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -124
+; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[C:%.*]], i8 [[X:%.*]], i8 [[TMP1]]
+; CHECK-NEXT:    [[NOT_AB:%.*]] = xor i8 [[TMP2]], [[A:%.*]]
+; CHECK-NEXT:    ret i8 [[NOT_AB]]
+;
+  %nx = xor i8 %x, -1
+  %yy = xor i8 %y, 123
+  %b = select i1 %c, i8 %nx, i8 %yy
+  %ab = xor i8 %a, %b
+  %not_ab = xor i8 %ab, -1
+  ret i8 %not_ab
+}
+
+define i8 @xor_fail(i8 %a, i1 %c, i8 %x, i8 %y) {
+; CHECK-LABEL: @xor_fail(
+; CHECK-NEXT:    [[NX:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    [[B:%.*]] = select i1 [[C:%.*]], i8 [[NX]], i8 [[Y:%.*]]
+; CHECK-NEXT:    [[AB:%.*]] = xor i8 [[B]], [[A:%.*]]
+; CHECK-NEXT:    [[NOT_AB:%.*]] = xor i8 [[AB]], -1
+; CHECK-NEXT:    ret i8 [[NOT_AB]]
+;
+  %nx = xor i8 %x, -1
+  %b = select i1 %c, i8 %nx, i8 %y
+  %ab = xor i8 %a, %b
+  %not_ab = xor i8 %ab, -1
+  ret i8 %not_ab
+}
+
+define i8 @add_1(i8 %a, i1 %c, i8 %x, i8 %y) {
+; CHECK-LABEL: @add_1(
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -124
+; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[C:%.*]], i8 [[X:%.*]], i8 [[TMP1]]
+; CHECK-NEXT:    [[NOT_BA:%.*]] = sub i8 [[TMP2]], [[A:%.*]]
+; CHECK-NEXT:    ret i8 [[NOT_BA]]
+;
+  %nx = xor i8 %x, -1
+  %yy = xor i8 %y, 123
+  %b = select i1 %c, i8 %nx, i8 %yy
+  %ba = add i8 %b, %a
+  %not_ba = xor i8 %ba, -1
+  ret i8 %not_ba
+}
+
+define i8 @add_2(i8 %a, i1 %c, i8 %x, i8 %y) {
+; CHECK-LABEL: @add_2(
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -124
+; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[C:%.*...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/66787


More information about the llvm-commits mailing list