[llvm] f8a574b - [InstCombine] C0 >> (X - C1) --> (C0 << C1) >> X

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 27 11:21:11 PDT 2022


Author: Nicolas Abram Lujan
Date: 2022-04-27T14:18:30-04:00
New Revision: f8a574bf4de48547a8d4f7f0b9c884e9e5be8d32

URL: https://github.com/llvm/llvm-project/commit/f8a574bf4de48547a8d4f7f0b9c884e9e5be8d32
DIFF: https://github.com/llvm/llvm-project/commit/f8a574bf4de48547a8d4f7f0b9c884e9e5be8d32.diff

LOG: [InstCombine] C0 >> (X - C1) --> (C0 << C1) >> X

With the right pre-conditions, we can fold the offset
into the shifted constant:
https://alive2.llvm.org/ce/z/drMRBU
https://alive2.llvm.org/ce/z/cUQv-_

Fixes #55016

Differential Revision: https://reviews.llvm.org/D124369

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
    llvm/test/Transforms/InstCombine/shift-add.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 0bd3ac2bf28a1..a0a35147ba33b 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -373,11 +373,12 @@ Instruction *InstCombinerImpl::commonShiftTransforms(BinaryOperator &I) {
 
   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
   assert(Op0->getType() == Op1->getType());
+  Type *Ty = I.getType();
 
   // If the shift amount is a one-use `sext`, we can demote it to `zext`.
   Value *Y;
   if (match(Op1, m_OneUse(m_SExt(m_Value(Y))))) {
-    Value *NewExt = Builder.CreateZExt(Y, I.getType(), Op1->getName());
+    Value *NewExt = Builder.CreateZExt(Y, Ty, Op1->getName());
     return BinaryOperator::Create(I.getOpcode(), Op0, NewExt);
   }
 
@@ -409,6 +410,47 @@ Instruction *InstCombinerImpl::commonShiftTransforms(BinaryOperator &I) {
     return BinaryOperator::Create(I.getOpcode(), NewC, A);
   }
 
+  unsigned BitWidth = Ty->getScalarSizeInBits();
+
+  const APInt *AC, *AddC;
+  // Try to pre-shift a constant shifted by a variable amount added with a
+  // negative number:
+  // C << (X - AddC) --> (C >> AddC) << X
+  // and
+  // C >> (X - AddC) --> (C << AddC) >> X
+  if (match(Op0, m_APInt(AC)) && match(Op1, m_Add(m_Value(A), m_APInt(AddC))) &&
+      AddC->isNegative() && (-*AddC).ult(BitWidth)) {
+    assert(!AC->isZero() && "Expected simplify of shifted zero");
+    unsigned PosOffset = (-*AddC).getZExtValue();
+
+    auto isSuitableForPreShift = [PosOffset, &I, AC]() {
+      switch (I.getOpcode()) {
+      default:
+        return false;
+      case Instruction::Shl:
+        return (I.hasNoSignedWrap() || I.hasNoUnsignedWrap()) &&
+               AC->eq(AC->lshr(PosOffset).shl(PosOffset));
+      case Instruction::LShr:
+        return I.isExact() && AC->eq(AC->shl(PosOffset).lshr(PosOffset));
+      case Instruction::AShr:
+        return I.isExact() && AC->eq(AC->shl(PosOffset).ashr(PosOffset));
+      }
+    };
+    if (isSuitableForPreShift()) {
+      Constant *NewC = ConstantInt::get(Ty, I.getOpcode() == Instruction::Shl
+                                                ? AC->lshr(PosOffset)
+                                                : AC->shl(PosOffset));
+      BinaryOperator *NewShiftOp =
+          BinaryOperator::Create(I.getOpcode(), NewC, A);
+      if (I.getOpcode() == Instruction::Shl) {
+        NewShiftOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
+      } else {
+        NewShiftOp->setIsExact();
+      }
+      return NewShiftOp;
+    }
+  }
+
   // X shift (A srem C) -> X shift (A and (C - 1)) iff C is a power of 2.
   // Because shifts by negative values (which could occur if A were negative)
   // are undefined.
@@ -416,7 +458,7 @@ Instruction *InstCombinerImpl::commonShiftTransforms(BinaryOperator &I) {
       match(C, m_Power2())) {
     // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
     // demand the sign bit (and many others) here??
-    Constant *Mask = ConstantExpr::getSub(C, ConstantInt::get(I.getType(), 1));
+    Constant *Mask = ConstantExpr::getSub(C, ConstantInt::get(Ty, 1));
     Value *Rem = Builder.CreateAnd(A, Mask, Op1->getName());
     return replaceOperand(I, 1, Rem);
   }
@@ -988,23 +1030,6 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
     return BinaryOperator::CreateLShr(
         ConstantInt::get(Ty, APInt::getSignMask(BitWidth)), X);
 
-  // Try to pre-shift a constant shifted by a variable amount:
-  // C << (X + AddC) --> (C >> -AddC) << X
-  // This requires a no-wrap flag and negative offset constant.
-  const APInt *AddC;
-  if ((I.hasNoSignedWrap() || I.hasNoUnsignedWrap()) &&
-      match(Op0, m_APInt(C)) && match(Op1, m_Add(m_Value(X), m_APInt(AddC))) &&
-      AddC->isNegative() && (-*AddC).ult(BitWidth)) {
-    assert(!C->isZero() && "Expected simplify of shifted zero");
-    unsigned PosOffset = (-*AddC).getZExtValue();
-    if (C->eq(C->lshr(PosOffset).shl(PosOffset))) {
-      Constant *NewC = ConstantInt::get(Ty, C->lshr(PosOffset));
-      Instruction *NewShl = BinaryOperator::CreateShl(NewC, X);
-      NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
-      return NewShl;
-    }
-  }
-
   return nullptr;
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/shift-add.ll b/llvm/test/Transforms/InstCombine/shift-add.ll
index 13d3c92ac05b7..90ea75e0a9b81 100644
--- a/llvm/test/Transforms/InstCombine/shift-add.ll
+++ b/llvm/test/Transforms/InstCombine/shift-add.ll
@@ -224,8 +224,7 @@ define i32 @lshr_add_negative_shift_no_exact(i32 %x) {
 
 define i32 @lshr_exact_add_negative_shift_positive(i32 %x) {
 ; CHECK-LABEL: @lshr_exact_add_negative_shift_positive(
-; CHECK-NEXT:    [[A:%.*]] = add i32 [[X:%.*]], -1
-; CHECK-NEXT:    [[R:%.*]] = lshr exact i32 2, [[A]]
+; CHECK-NEXT:    [[R:%.*]] = lshr exact i32 4, [[X:%.*]]
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = add i32 %x, -1
@@ -237,7 +236,7 @@ define i8 @lshr_exact_add_negative_shift_positive_extra_use(i8 %x) {
 ; CHECK-LABEL: @lshr_exact_add_negative_shift_positive_extra_use(
 ; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use(i8 [[A]])
-; CHECK-NEXT:    [[R:%.*]] = lshr exact i8 64, [[A]]
+; CHECK-NEXT:    [[R:%.*]] = lshr exact i8 -128, [[X]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a = add i8 %x, -1
@@ -248,8 +247,7 @@ define i8 @lshr_exact_add_negative_shift_positive_extra_use(i8 %x) {
 
 define <2 x i9> @lshr_exact_add_negative_shift_positive_vec(<2 x i9> %x) {
 ; CHECK-LABEL: @lshr_exact_add_negative_shift_positive_vec(
-; CHECK-NEXT:    [[A:%.*]] = add <2 x i9> [[X:%.*]], <i9 -7, i9 -7>
-; CHECK-NEXT:    [[R:%.*]] = lshr exact <2 x i9> <i9 2, i9 2>, [[A]]
+; CHECK-NEXT:    [[R:%.*]] = lshr exact <2 x i9> <i9 -256, i9 -256>, [[X:%.*]]
 ; CHECK-NEXT:    ret <2 x i9> [[R]]
 ;
   %a = add <2 x i9> %x, <i9 -7, i9 -7>
@@ -309,8 +307,7 @@ define i32 @ashr_add_negative_shift_no_exact(i32 %x) {
 
 define i32 @ashr_exact_add_negative_shift_negative(i32 %x) {
 ; CHECK-LABEL: @ashr_exact_add_negative_shift_negative(
-; CHECK-NEXT:    [[A:%.*]] = add i32 [[X:%.*]], -1
-; CHECK-NEXT:    [[R:%.*]] = ashr exact i32 -2, [[A]]
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i32 -4, [[X:%.*]]
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = add i32 %x, -1
@@ -322,7 +319,7 @@ define i8 @ashr_exact_add_negative_shift_negative_extra_use(i8 %x) {
 ; CHECK-LABEL: @ashr_exact_add_negative_shift_negative_extra_use(
 ; CHECK-NEXT:    [[A:%.*]] = add i8 [[X:%.*]], -2
 ; CHECK-NEXT:    call void @use(i8 [[A]])
-; CHECK-NEXT:    [[R:%.*]] = ashr exact i8 -32, [[A]]
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i8 -128, [[X]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a = add i8 %x, -2
@@ -333,8 +330,7 @@ define i8 @ashr_exact_add_negative_shift_negative_extra_use(i8 %x) {
 
 define <2 x i7> @ashr_exact_add_negative_shift_negative_vec(<2 x i7> %x) {
 ; CHECK-LABEL: @ashr_exact_add_negative_shift_negative_vec(
-; CHECK-NEXT:    [[A:%.*]] = add <2 x i7> [[X:%.*]], <i7 -5, i7 -5>
-; CHECK-NEXT:    [[R:%.*]] = ashr exact <2 x i7> <i7 -2, i7 -2>, [[A]]
+; CHECK-NEXT:    [[R:%.*]] = ashr exact <2 x i7> <i7 -64, i7 -64>, [[X:%.*]]
 ; CHECK-NEXT:    ret <2 x i7> [[R]]
 ;
   %a = add <2 x i7> %x, <i7 -5, i7 -5>


        


More information about the llvm-commits mailing list