[llvm-commits] [llvm] r72204 - in /llvm/trunk: lib/Analysis/ValueTracking.cpp lib/Transforms/Scalar/InstructionCombining.cpp test/Transforms/InstCombine/lshr-phi.ll

Dan Gohman gohman at apple.com
Wed May 20 19:28:34 PDT 2009


Author: djg
Date: Wed May 20 21:28:33 2009
New Revision: 72204

URL: http://llvm.org/viewvc/llvm-project?rev=72204&view=rev
Log:
Teach ValueTracking a new way to analyze PHI nodes, and and teach
Instcombine to be more aggressive about using SimplifyDemandedBits
on shift nodes. This allows a shift to be simplified to zero in the
included test case.

Added:
    llvm/trunk/test/Transforms/InstCombine/lshr-phi.ll
Modified:
    llvm/trunk/lib/Analysis/ValueTracking.cpp
    llvm/trunk/lib/Transforms/Scalar/InstructionCombining.cpp

Modified: llvm/trunk/lib/Analysis/ValueTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ValueTracking.cpp?rev=72204&r1=72203&r2=72204&view=diff

==============================================================================
--- llvm/trunk/lib/Analysis/ValueTracking.cpp (original)
+++ llvm/trunk/lib/Analysis/ValueTracking.cpp Wed May 20 21:28:33 2009
@@ -48,8 +48,9 @@
 void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
                              APInt &KnownZero, APInt &KnownOne,
                              TargetData *TD, unsigned Depth) {
+  const unsigned MaxDepth = 6;
   assert(V && "No Value?");
-  assert(Depth <= 6 && "Limit Search Depth");
+  assert(Depth <= MaxDepth && "Limit Search Depth");
   unsigned BitWidth = Mask.getBitWidth();
   assert((V->getType()->isInteger() || isa<PointerType>(V->getType())) &&
          "Not integer or pointer type!");
@@ -88,7 +89,7 @@
 
   KnownZero.clear(); KnownOne.clear();   // Start out not knowing anything.
 
-  if (Depth == 6 || Mask == 0)
+  if (Depth == MaxDepth || Mask == 0)
     return;  // Limit search depth.
 
   User *I = dyn_cast<User>(V);
@@ -522,6 +523,30 @@
         }
       }
     }
+
+    // Otherwise take the unions of the known bit sets of the operands,
+    // taking conservative care to avoid excessive recursion.
+    if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
+      KnownZero = APInt::getAllOnesValue(BitWidth);
+      KnownOne = APInt::getAllOnesValue(BitWidth);
+      for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
+        // Skip direct self references.
+        if (P->getIncomingValue(i) == P) continue;
+
+        KnownZero2 = APInt(BitWidth, 0);
+        KnownOne2 = APInt(BitWidth, 0);
+        // Recurse, but cap the recursion to one level, because we don't
+        // want to waste time spinning around in loops.
+        ComputeMaskedBits(P->getIncomingValue(i), KnownZero | KnownOne,
+                          KnownZero2, KnownOne2, TD, MaxDepth-1);
+        KnownZero &= KnownZero2;
+        KnownOne &= KnownOne2;
+        // If all bits have been ruled out, there's no need to check
+        // more operands.
+        if (!KnownZero && !KnownOne)
+          break;
+      }
+    }
     break;
   }
   case Instruction::Call:

Modified: llvm/trunk/lib/Transforms/Scalar/InstructionCombining.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/InstructionCombining.cpp?rev=72204&r1=72203&r2=72204&view=diff

==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/InstructionCombining.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/InstructionCombining.cpp Wed May 20 21:28:33 2009
@@ -7152,6 +7152,10 @@
       return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
   }
 
+  // See if we can fold away this shift.
+  if (!isa<VectorType>(I.getType()) && SimplifyDemandedInstructionBits(I))
+    return &I;
+
   // Try to fold constant and into select arguments.
   if (isa<Constant>(Op0))
     if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
@@ -7171,8 +7175,6 @@
   // See if we can simplify any instructions used by the instruction whose sole 
   // purpose is to compute bits we don't care about.
   uint32_t TypeBits = Op0->getType()->getPrimitiveSizeInBits();
-  if (SimplifyDemandedInstructionBits(I))
-    return &I;
   
   // shl uint X, 32 = 0 and shr ubyte Y, 9 = 0, ... just don't eliminate shr
   // of a signed value.

Added: llvm/trunk/test/Transforms/InstCombine/lshr-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/lshr-phi.ll?rev=72204&view=auto

==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/lshr-phi.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/lshr-phi.ll Wed May 20 21:28:33 2009
@@ -0,0 +1,35 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis > %t
+; RUN: not grep lshr %t
+; RUN: grep add %t | count 1
+
+; Instcombine should be able to eliminate the lshr, because only
+; bits in the operand which might be non-zero will be shifted
+; off the end.
+
+define i32 @hash_string(i8* nocapture %key) nounwind readonly {
+entry:
+	%t0 = load i8* %key, align 1		; <i8> [#uses=1]
+	%t1 = icmp eq i8 %t0, 0		; <i1> [#uses=1]
+	br i1 %t1, label %bb2, label %bb
+
+bb:		; preds = %bb, %entry
+	%indvar = phi i64 [ 0, %entry ], [ %tmp, %bb ]		; <i64> [#uses=2]
+	%k.04 = phi i32 [ 0, %entry ], [ %t8, %bb ]		; <i32> [#uses=2]
+	%cp.05 = getelementptr i8* %key, i64 %indvar		; <i8*> [#uses=1]
+	%t2 = shl i32 %k.04, 1		; <i32> [#uses=1]
+	%t3 = lshr i32 %k.04, 14		; <i32> [#uses=1]
+	%t4 = add i32 %t2, %t3		; <i32> [#uses=1]
+	%t5 = load i8* %cp.05, align 1		; <i8> [#uses=1]
+	%t6 = sext i8 %t5 to i32		; <i32> [#uses=1]
+	%t7 = xor i32 %t6, %t4		; <i32> [#uses=1]
+	%t8 = and i32 %t7, 16383		; <i32> [#uses=2]
+	%tmp = add i64 %indvar, 1		; <i64> [#uses=2]
+	%scevgep = getelementptr i8* %key, i64 %tmp		; <i8*> [#uses=1]
+	%t9 = load i8* %scevgep, align 1		; <i8> [#uses=1]
+	%t10 = icmp eq i8 %t9, 0		; <i1> [#uses=1]
+	br i1 %t10, label %bb2, label %bb
+
+bb2:		; preds = %bb, %entry
+	%k.0.lcssa = phi i32 [ 0, %entry ], [ %t8, %bb ]		; <i32> [#uses=1]
+	ret i32 %k.0.lcssa
+}





More information about the llvm-commits mailing list