[llvm] de18fa9 - Revert "[InstSimplify] Bypass no-op `and`-mask, using known bits (PR49543)"

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 21 00:56:49 PDT 2021


Author: Nikita Popov
Date: 2021-04-21T09:55:25+02:00
New Revision: de18fa9e52a439798edf89df6fee807908814416

URL: https://github.com/llvm/llvm-project/commit/de18fa9e52a439798edf89df6fee807908814416
DIFF: https://github.com/llvm/llvm-project/commit/de18fa9e52a439798edf89df6fee807908814416.diff

LOG: Revert "[InstSimplify] Bypass no-op `and`-mask, using known bits (PR49543)"

This reverts commit ea1a0d7c9ae3e5232a4163fc67efad4aabd51f2b.

While this is strictly more powerful, it is also strictly slower.
InstSimplify intentionally does not perform many folds that it
is allowed to perform, if doing so requires a KnownBits calculation
that will be repeated in InstCombine.

Maybe it's worthwhile to do this here, but that needs a more
explicitly stated motivation, evaluated in a review.

Added: 
    

Modified: 
    llvm/lib/Analysis/InstructionSimplify.cpp
    llvm/test/Transforms/InstSimplify/AndOrXor.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 43441184115f..08f504a0ce37 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -2081,12 +2081,21 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
     return V;
 
-  // A mask that only clears known zeros of a value is a no-op.
+  // A mask that only clears known zeros of a shifted value is a no-op.
   Value *X;
   const APInt *Mask;
+  const APInt *ShAmt;
   if (match(Op1, m_APInt(Mask))) {
-    KnownBits Known = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
-    if ((~*Mask).isSubsetOf(Known.Zero))
+    // If all bits in the inverted and shifted mask are clear:
+    // and (shl X, ShAmt), Mask --> shl X, ShAmt
+    if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
+        (~(*Mask)).lshr(*ShAmt).isNullValue())
+      return Op0;
+
+    // If all bits in the inverted and shifted mask are clear:
+    // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
+    if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
+        (~(*Mask)).shl(*ShAmt).isNullValue())
       return Op0;
   }
 
@@ -2171,7 +2180,6 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
   // SimplifyDemandedBits in InstCombine can optimize the general case.
   // This pattern aims to help other passes for a common case.
   Value *Y, *XShifted;
-  const APInt *ShAmt;
   if (match(Op1, m_APInt(Mask)) &&
       match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
                                      m_Value(XShifted)),

diff  --git a/llvm/test/Transforms/InstSimplify/AndOrXor.ll b/llvm/test/Transforms/InstSimplify/AndOrXor.ll
index 09f6faf3928c..ee94d2f7720e 100644
--- a/llvm/test/Transforms/InstSimplify/AndOrXor.ll
+++ b/llvm/test/Transforms/InstSimplify/AndOrXor.ll
@@ -1192,7 +1192,8 @@ define i8 @noop_and_t0(i8 %x) {
 ; CHECK-LABEL: @noop_and_t0(
 ; CHECK-NEXT:    [[A:%.*]] = shl i8 [[X:%.*]], 3
 ; CHECK-NEXT:    [[B:%.*]] = lshr i8 [[A]], 2
-; CHECK-NEXT:    ret i8 [[B]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[B]], 62
+; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a = shl i8 %x, 3
   %b = lshr i8 %a, 2
@@ -1203,7 +1204,8 @@ define i8 @noop_and_t1(i8 %x) {
 ; CHECK-LABEL: @noop_and_t1(
 ; CHECK-NEXT:    [[A:%.*]] = shl i8 [[X:%.*]], 3
 ; CHECK-NEXT:    [[B:%.*]] = lshr i8 [[A]], 2
-; CHECK-NEXT:    ret i8 [[B]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[B]], 126
+; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a = shl i8 %x, 3
   %b = lshr i8 %a, 2


        


More information about the llvm-commits mailing list