[llvm] r293814 - [InstCombine] move folds for shift-shift pairs; NFCI

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 1 13:31:34 PST 2017


Author: spatel
Date: Wed Feb  1 15:31:34 2017
New Revision: 293814

URL: http://llvm.org/viewvc/llvm-project?rev=293814&view=rev
Log:
[InstCombine] move folds for shift-shift pairs; NFCI

Although this is 'no-functional-change-intended', I'm adding tests
for shl-shl and lshr-lshr pairs because there is no existing test 
coverage for those folds.

It seems like we should be able to remove some code from foldShiftedShift()
at this point because we're handling those patterns on the general path.

Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp
    llvm/trunk/test/Transforms/InstCombine/apint-shift.ll

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp?rev=293814&r1=293813&r2=293814&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp Wed Feb  1 15:31:34 2017
@@ -309,41 +309,6 @@ static Value *getShiftedValue(Value *V,
   }
 }
 
-/// Try to fold (X << C1) << C2, where the shifts are some combination of
-/// shl/ashr/lshr.
-static Instruction *
-foldShiftByConstOfShiftByConst(BinaryOperator &I, const APInt *COp1,
-                               InstCombiner::BuilderTy *Builder) {
-  Value *Op0 = I.getOperand(0);
-  unsigned TypeBits = Op0->getType()->getScalarSizeInBits();
-
-  // Find out if this is a shift of a shift by a constant.
-  BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
-  if (!ShiftOp || !ShiftOp->isShift())
-    return nullptr;
-
-  const APInt *ShAmt1;
-  if (!match(ShiftOp->getOperand(1), m_APInt(ShAmt1)))
-    return nullptr;
-
-  // Check for (X << c1) << c2  and  (X >> c1) >> c2
-  if (I.getOpcode() == ShiftOp->getOpcode()) {
-    unsigned AmtSum = (*ShAmt1 + *COp1).getZExtValue();
-    // If this is an oversized composite shift, then unsigned shifts become
-    // zero (handled in InstSimplify) and ashr saturates.
-    if (AmtSum >= TypeBits) {
-      if (I.getOpcode() != Instruction::AShr)
-        return nullptr;
-      AmtSum = TypeBits - 1; // Saturate to 31 for i32 ashr.
-    }
-
-    return BinaryOperator::Create(I.getOpcode(), ShiftOp->getOperand(0),
-                                  ConstantInt::get(I.getType(), AmtSum));
-  }
-
-  return nullptr;
-}
-
 Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
                                                BinaryOperator &I) {
   bool isLeftShift = I.getOpcode() == Instruction::Shl;
@@ -553,9 +518,6 @@ Instruction *InstCombiner::FoldShiftByCo
     }
   }
 
-  if (Instruction *Folded = foldShiftByConstOfShiftByConst(I, Op1C, Builder))
-    return Folded;
-
   return nullptr;
 }
 
@@ -598,10 +560,10 @@ Instruction *InstCombiner::visitShl(Bina
     // arithmetic expressions are still recognizable by scalar evolution.
     // The inexact versions are deferred to DAGCombine, so we don't hide shl
     // behind a bit mask.
-    const APInt *ShrOp1;
-    if (match(Op0, m_CombineOr(m_Exact(m_LShr(m_Value(X), m_APInt(ShrOp1))),
-                               m_Exact(m_AShr(m_Value(X), m_APInt(ShrOp1)))))) {
-      unsigned ShrAmt = ShrOp1->getZExtValue();
+    const APInt *ShOp1;
+    if (match(Op0, m_CombineOr(m_Exact(m_LShr(m_Value(X), m_APInt(ShOp1))),
+                               m_Exact(m_AShr(m_Value(X), m_APInt(ShOp1)))))) {
+      unsigned ShrAmt = ShOp1->getZExtValue();
       if (ShrAmt < ShAmt) {
         // If C1 < C2: (X >>?,exact C1) << C2 --> X << (C2 - C1)
         Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShrAmt);
@@ -620,6 +582,14 @@ Instruction *InstCombiner::visitShl(Bina
       }
     }
 
+    if (match(Op0, m_Shl(m_Value(X), m_APInt(ShOp1)))) {
+      unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
+      // Oversized shifts are simplified to zero in InstSimplify.
+      if (AmtSum < BitWidth)
+        // (X << C1) << C2 --> X << (C1 + C2)
+        return BinaryOperator::CreateShl(X, ConstantInt::get(Ty, AmtSum));
+    }
+
     // If the shifted-out value is known-zero, then this is a NUW shift.
     if (!I.hasNoUnsignedWrap() &&
         MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, ShAmt), 0, &I)) {
@@ -675,9 +645,9 @@ Instruction *InstCombiner::visitLShr(Bin
     }
 
     Value *X;
-    const APInt *ShlAmtAPInt;
-    if (match(Op0, m_Shl(m_Value(X), m_APInt(ShlAmtAPInt)))) {
-      unsigned ShlAmt = ShlAmtAPInt->getZExtValue();
+    const APInt *ShOp1;
+    if (match(Op0, m_Shl(m_Value(X), m_APInt(ShOp1)))) {
+      unsigned ShlAmt = ShOp1->getZExtValue();
       if (ShlAmt < ShAmt) {
         Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
         if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
@@ -710,6 +680,14 @@ Instruction *InstCombiner::visitLShr(Bin
       return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
     }
 
+    if (match(Op0, m_LShr(m_Value(X), m_APInt(ShOp1)))) {
+      unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
+      // Oversized shifts are simplified to zero in InstSimplify.
+      if (AmtSum < BitWidth)
+        // (X >>u C1) >>u C2 --> X >>u (C1 + C2)
+        return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
+    }
+
     // If the shifted-out value is known-zero, then this is an exact shift.
     if (!I.isExact() &&
         MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
@@ -747,9 +725,9 @@ Instruction *InstCombiner::visitAShr(Bin
 
     // We can't handle (X << C1) >>s C2. It shifts arbitrary bits in. However,
     // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
-    const APInt *ShlAmtAPInt;
-    if (match(Op0, m_NSWShl(m_Value(X), m_APInt(ShlAmtAPInt)))) {
-      unsigned ShlAmt = ShlAmtAPInt->getZExtValue();
+    const APInt *ShOp1;
+    if (match(Op0, m_NSWShl(m_Value(X), m_APInt(ShOp1)))) {
+      unsigned ShlAmt = ShOp1->getZExtValue();
       if (ShlAmt < ShAmt) {
         // (X <<nsw C1) >>s C2 --> X >>s (C2 - C1)
         Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
@@ -766,6 +744,14 @@ Instruction *InstCombiner::visitAShr(Bin
       }
     }
 
+    if (match(Op0, m_AShr(m_Value(X), m_APInt(ShOp1)))) {
+      unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
+      // Oversized arithmetic shifts replicate the sign bit.
+      AmtSum = std::min(AmtSum, BitWidth - 1);
+      // (X >>s C1) >>s C2 --> X >>s (C1 + C2)
+      return BinaryOperator::CreateAShr(X, ConstantInt::get(Ty, AmtSum));
+    }
+
     // If the shifted-out value is known-zero, then this is an exact shift.
     if (!I.isExact() &&
         MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {

Modified: llvm/trunk/test/Transforms/InstCombine/apint-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/apint-shift.ll?rev=293814&r1=293813&r2=293814&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/apint-shift.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/apint-shift.ll Wed Feb  1 15:31:34 2017
@@ -126,6 +126,32 @@ define <2 x i19> @lshr_lshr_splat_vec(<2
   ret <2 x i19> %sh2
 }
 
+define i9 @multituse_lshr_lshr(i9 %x) {
+; CHECK-LABEL: @multituse_lshr_lshr(
+; CHECK-NEXT:    [[SH1:%.*]] = lshr i9 %x, 2
+; CHECK-NEXT:    [[SH2:%.*]] = lshr i9 %x, 5
+; CHECK-NEXT:    [[MUL:%.*]] = mul i9 [[SH1]], [[SH2]]
+; CHECK-NEXT:    ret i9 [[MUL]]
+;
+  %sh1 = lshr i9 %x, 2
+  %sh2 = lshr i9 %sh1, 3
+  %mul = mul i9 %sh1, %sh2
+  ret i9 %mul
+}
+
+define <2 x i9> @multiuse_lshr_lshr_splat(<2 x i9> %x) {
+; CHECK-LABEL: @multiuse_lshr_lshr_splat(
+; CHECK-NEXT:    [[SH1:%.*]] = lshr <2 x i9> %x, <i9 2, i9 2>
+; CHECK-NEXT:    [[SH2:%.*]] = lshr <2 x i9> %x, <i9 5, i9 5>
+; CHECK-NEXT:    [[MUL:%.*]] = mul <2 x i9> [[SH1]], [[SH2]]
+; CHECK-NEXT:    ret <2 x i9> [[MUL]]
+;
+  %sh1 = lshr <2 x i9> %x, <i9 2, i9 2>
+  %sh2 = lshr <2 x i9> %sh1, <i9 3, i9 3>
+  %mul = mul <2 x i9> %sh1, %sh2
+  ret <2 x i9> %mul
+}
+
 ; Two left shifts in the same direction:
 ; shl (shl X, C1), C2 -->  shl X, C1 + C2
 
@@ -139,6 +165,32 @@ define <2 x i19> @shl_shl_splat_vec(<2 x
   ret <2 x i19> %sh2
 }
 
+define i42 @multiuse_shl_shl(i42 %x) {
+; CHECK-LABEL: @multiuse_shl_shl(
+; CHECK-NEXT:    [[SH1:%.*]] = shl i42 %x, 8
+; CHECK-NEXT:    [[SH2:%.*]] = shl i42 %x, 17
+; CHECK-NEXT:    [[MUL:%.*]] = mul i42 [[SH1]], [[SH2]]
+; CHECK-NEXT:    ret i42 [[MUL]]
+;
+  %sh1 = shl i42 %x, 8
+  %sh2 = shl i42 %sh1, 9
+  %mul = mul i42 %sh1, %sh2
+  ret i42 %mul
+}
+
+define <2 x i42> @mulituse_shl_shl_splat(<2 x i42> %x) {
+; CHECK-LABEL: @mulituse_shl_shl_splat(
+; CHECK-NEXT:    [[SH1:%.*]] = shl <2 x i42> %x, <i42 8, i42 8>
+; CHECK-NEXT:    [[SH2:%.*]] = shl <2 x i42> %x, <i42 17, i42 17>
+; CHECK-NEXT:    [[MUL:%.*]] = mul <2 x i42> [[SH1]], [[SH2]]
+; CHECK-NEXT:    ret <2 x i42> [[MUL]]
+;
+  %sh1 = shl <2 x i42> %x, <i42 8, i42 8>
+  %sh2 = shl <2 x i42> %sh1, <i42 9, i42 9>
+  %mul = mul <2 x i42> %sh1, %sh2
+  ret <2 x i42> %mul
+}
+
 ; Equal shift amounts in opposite directions become bitwise 'and':
 ; lshr (shl X, C), C --> and X, C'
 




More information about the llvm-commits mailing list