[llvm] r292230 - [InstCombine] reduce indent; NFCI

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 17 08:59:09 PST 2017


Author: spatel
Date: Tue Jan 17 10:59:09 2017
New Revision: 292230

URL: http://llvm.org/viewvc/llvm-project?rev=292230&view=rev
Log:
[InstCombine] reduce indent; NFCI

Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp?rev=292230&r1=292229&r2=292230&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp Tue Jan 17 10:59:09 2017
@@ -319,151 +319,149 @@ foldShiftByConstOfShiftByConst(BinaryOpe
 
   // Find out if this is a shift of a shift by a constant.
   BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
-  if (ShiftOp && !ShiftOp->isShift())
-    ShiftOp = nullptr;
+  if (!ShiftOp || !ShiftOp->isShift() ||
+      !isa<ConstantInt>(ShiftOp->getOperand(1)))
+    return nullptr;
+
+  // This is a constant shift of a constant shift. Be careful about hiding
+  // shl instructions behind bit masks. They are used to represent multiplies
+  // by a constant, and it is important that simple arithmetic expressions
+  // are still recognizable by scalar evolution.
+  //
+  // The transforms applied to shl are very similar to the transforms applied
+  // to mul by constant. We can be more aggressive about optimizing right
+  // shifts.
+  //
+  // Combinations of right and left shifts will still be optimized in
+  // DAGCombine where scalar evolution no longer applies.
+
+  ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
+  uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
+  uint32_t ShiftAmt2 = COp1->getLimitedValue(TypeBits);
+  assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
+  if (ShiftAmt1 == 0)
+    return nullptr; // Will be simplified in the future.
+  Value *X = ShiftOp->getOperand(0);
+
+  IntegerType *Ty = cast<IntegerType>(I.getType());
+
+  // Check for (X << c1) << c2  and  (X >> c1) >> c2
+  if (I.getOpcode() == ShiftOp->getOpcode()) {
+    uint32_t AmtSum = ShiftAmt1 + ShiftAmt2; // Fold into one big shift.
+    // If this is an oversized composite shift, then unsigned shifts become
+    // zero (handled in InstSimplify) and ashr saturates.
+    if (AmtSum >= TypeBits) {
+      if (I.getOpcode() != Instruction::AShr)
+        return nullptr;
+      AmtSum = TypeBits - 1; // Saturate to 31 for i32 ashr.
+    }
 
-  if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
+    return BinaryOperator::Create(I.getOpcode(), X,
+                                  ConstantInt::get(Ty, AmtSum));
+  }
 
-    // This is a constant shift of a constant shift. Be careful about hiding
-    // shl instructions behind bit masks. They are used to represent multiplies
-    // by a constant, and it is important that simple arithmetic expressions
-    // are still recognizable by scalar evolution.
-    //
-    // The transforms applied to shl are very similar to the transforms applied
-    // to mul by constant. We can be more aggressive about optimizing right
-    // shifts.
-    //
-    // Combinations of right and left shifts will still be optimized in
-    // DAGCombine where scalar evolution no longer applies.
-
-    ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
-    uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
-    uint32_t ShiftAmt2 = COp1->getLimitedValue(TypeBits);
-    assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
-    if (ShiftAmt1 == 0)
-      return nullptr; // Will be simplified in the future.
-    Value *X = ShiftOp->getOperand(0);
-
-    IntegerType *Ty = cast<IntegerType>(I.getType());
-
-    // Check for (X << c1) << c2  and  (X >> c1) >> c2
-    if (I.getOpcode() == ShiftOp->getOpcode()) {
-      uint32_t AmtSum = ShiftAmt1 + ShiftAmt2; // Fold into one big shift.
-      // If this is an oversized composite shift, then unsigned shifts become
-      // zero (handled in InstSimplify) and ashr saturates.
-      if (AmtSum >= TypeBits) {
-        if (I.getOpcode() != Instruction::AShr)
-          return nullptr;
-        AmtSum = TypeBits - 1; // Saturate to 31 for i32 ashr.
-      }
+  if (ShiftAmt1 == ShiftAmt2) {
+    // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
+    if (I.getOpcode() == Instruction::LShr &&
+        ShiftOp->getOpcode() == Instruction::Shl) {
+      APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
+      return BinaryOperator::CreateAnd(X,
+                                       ConstantInt::get(I.getContext(), Mask));
+    }
+  } else if (ShiftAmt1 < ShiftAmt2) {
+    uint32_t ShiftDiff = ShiftAmt2 - ShiftAmt1;
 
-      return BinaryOperator::Create(I.getOpcode(), X,
-                                    ConstantInt::get(Ty, AmtSum));
+    // (X >>?,exact C1) << C2 --> X << (C2-C1)
+    // The inexact version is deferred to DAGCombine so we don't hide shl
+    // behind a bit mask.
+    if (I.getOpcode() == Instruction::Shl &&
+        ShiftOp->getOpcode() != Instruction::Shl && ShiftOp->isExact()) {
+      assert(ShiftOp->getOpcode() == Instruction::LShr ||
+             ShiftOp->getOpcode() == Instruction::AShr);
+      ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+      BinaryOperator *NewShl =
+          BinaryOperator::Create(Instruction::Shl, X, ShiftDiffCst);
+      NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
+      NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
+      return NewShl;
     }
 
-    if (ShiftAmt1 == ShiftAmt2) {
-      // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
-      if (I.getOpcode() == Instruction::LShr &&
-          ShiftOp->getOpcode() == Instruction::Shl) {
-        APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
-        return BinaryOperator::CreateAnd(
-            X, ConstantInt::get(I.getContext(), Mask));
-      }
-    } else if (ShiftAmt1 < ShiftAmt2) {
-      uint32_t ShiftDiff = ShiftAmt2 - ShiftAmt1;
+    // (X << C1) >>u C2  --> X >>u (C2-C1) & (-1 >> C2)
+    if (I.getOpcode() == Instruction::LShr &&
+        ShiftOp->getOpcode() == Instruction::Shl) {
+      ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+      // (X <<nuw C1) >>u C2 --> X >>u (C2-C1)
+      if (ShiftOp->hasNoUnsignedWrap()) {
+        BinaryOperator *NewLShr =
+            BinaryOperator::Create(Instruction::LShr, X, ShiftDiffCst);
+        NewLShr->setIsExact(I.isExact());
+        return NewLShr;
+      }
+      Value *Shift = Builder->CreateLShr(X, ShiftDiffCst);
+
+      APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
+      return BinaryOperator::CreateAnd(Shift,
+                                       ConstantInt::get(I.getContext(), Mask));
+    }
 
-      // (X >>?,exact C1) << C2 --> X << (C2-C1)
-      // The inexact version is deferred to DAGCombine so we don't hide shl
-      // behind a bit mask.
-      if (I.getOpcode() == Instruction::Shl &&
-          ShiftOp->getOpcode() != Instruction::Shl && ShiftOp->isExact()) {
-        assert(ShiftOp->getOpcode() == Instruction::LShr ||
-               ShiftOp->getOpcode() == Instruction::AShr);
+    // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. However,
+    // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
+    if (I.getOpcode() == Instruction::AShr &&
+        ShiftOp->getOpcode() == Instruction::Shl) {
+      if (ShiftOp->hasNoSignedWrap()) {
+        // (X <<nsw C1) >>s C2 --> X >>s (C2-C1)
         ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+        BinaryOperator *NewAShr =
+            BinaryOperator::Create(Instruction::AShr, X, ShiftDiffCst);
+        NewAShr->setIsExact(I.isExact());
+        return NewAShr;
+      }
+    }
+  } else {
+    assert(ShiftAmt2 < ShiftAmt1);
+    uint32_t ShiftDiff = ShiftAmt1 - ShiftAmt2;
+
+    // (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
+    // The inexact version is deferred to DAGCombine so we don't hide shl
+    // behind a bit mask.
+    if (I.getOpcode() == Instruction::Shl &&
+        ShiftOp->getOpcode() != Instruction::Shl && ShiftOp->isExact()) {
+      ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+      BinaryOperator *NewShr =
+          BinaryOperator::Create(ShiftOp->getOpcode(), X, ShiftDiffCst);
+      NewShr->setIsExact(true);
+      return NewShr;
+    }
+
+    // (X << C1) >>u C2  --> X << (C1-C2) & (-1 >> C2)
+    if (I.getOpcode() == Instruction::LShr &&
+        ShiftOp->getOpcode() == Instruction::Shl) {
+      ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+      if (ShiftOp->hasNoUnsignedWrap()) {
+        // (X <<nuw C1) >>u C2 --> X <<nuw (C1-C2)
         BinaryOperator *NewShl =
             BinaryOperator::Create(Instruction::Shl, X, ShiftDiffCst);
-        NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
-        NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
+        NewShl->setHasNoUnsignedWrap(true);
         return NewShl;
       }
+      Value *Shift = Builder->CreateShl(X, ShiftDiffCst);
 
-      // (X << C1) >>u C2  --> X >>u (C2-C1) & (-1 >> C2)
-      if (I.getOpcode() == Instruction::LShr &&
-          ShiftOp->getOpcode() == Instruction::Shl) {
-        ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
-        // (X <<nuw C1) >>u C2 --> X >>u (C2-C1)
-        if (ShiftOp->hasNoUnsignedWrap()) {
-          BinaryOperator *NewLShr =
-              BinaryOperator::Create(Instruction::LShr, X, ShiftDiffCst);
-          NewLShr->setIsExact(I.isExact());
-          return NewLShr;
-        }
-        Value *Shift = Builder->CreateLShr(X, ShiftDiffCst);
-
-        APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
-        return BinaryOperator::CreateAnd(
-            Shift, ConstantInt::get(I.getContext(), Mask));
-      }
-
-      // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. However,
-      // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
-      if (I.getOpcode() == Instruction::AShr &&
-          ShiftOp->getOpcode() == Instruction::Shl) {
-        if (ShiftOp->hasNoSignedWrap()) {
-          // (X <<nsw C1) >>s C2 --> X >>s (C2-C1)
-          ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
-          BinaryOperator *NewAShr =
-              BinaryOperator::Create(Instruction::AShr, X, ShiftDiffCst);
-          NewAShr->setIsExact(I.isExact());
-          return NewAShr;
-        }
-      }
-    } else {
-      assert(ShiftAmt2 < ShiftAmt1);
-      uint32_t ShiftDiff = ShiftAmt1 - ShiftAmt2;
-
-      // (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
-      // The inexact version is deferred to DAGCombine so we don't hide shl
-      // behind a bit mask.
-      if (I.getOpcode() == Instruction::Shl &&
-          ShiftOp->getOpcode() != Instruction::Shl && ShiftOp->isExact()) {
-        ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
-        BinaryOperator *NewShr =
-            BinaryOperator::Create(ShiftOp->getOpcode(), X, ShiftDiffCst);
-        NewShr->setIsExact(true);
-        return NewShr;
-      }
+      APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
+      return BinaryOperator::CreateAnd(Shift,
+                                       ConstantInt::get(I.getContext(), Mask));
+    }
 
-      // (X << C1) >>u C2  --> X << (C1-C2) & (-1 >> C2)
-      if (I.getOpcode() == Instruction::LShr &&
-          ShiftOp->getOpcode() == Instruction::Shl) {
+    // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. However,
+    // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
+    if (I.getOpcode() == Instruction::AShr &&
+        ShiftOp->getOpcode() == Instruction::Shl) {
+      if (ShiftOp->hasNoSignedWrap()) {
+        // (X <<nsw C1) >>s C2 --> X <<nsw (C1-C2)
         ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
-        if (ShiftOp->hasNoUnsignedWrap()) {
-          // (X <<nuw C1) >>u C2 --> X <<nuw (C1-C2)
-          BinaryOperator *NewShl =
-              BinaryOperator::Create(Instruction::Shl, X, ShiftDiffCst);
-          NewShl->setHasNoUnsignedWrap(true);
-          return NewShl;
-        }
-        Value *Shift = Builder->CreateShl(X, ShiftDiffCst);
-
-        APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
-        return BinaryOperator::CreateAnd(
-            Shift, ConstantInt::get(I.getContext(), Mask));
-      }
-
-      // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. However,
-      // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
-      if (I.getOpcode() == Instruction::AShr &&
-          ShiftOp->getOpcode() == Instruction::Shl) {
-        if (ShiftOp->hasNoSignedWrap()) {
-          // (X <<nsw C1) >>s C2 --> X <<nsw (C1-C2)
-          ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
-          BinaryOperator *NewShl =
-              BinaryOperator::Create(Instruction::Shl, X, ShiftDiffCst);
-          NewShl->setHasNoSignedWrap(true);
-          return NewShl;
-        }
+        BinaryOperator *NewShl =
+            BinaryOperator::Create(Instruction::Shl, X, ShiftDiffCst);
+        NewShl->setHasNoSignedWrap(true);
+        return NewShl;
       }
     }
   }




More information about the llvm-commits mailing list