[llvm-branch-commits] [llvm-branch] r71496 - in /llvm/branches/Apple/Dib: lib/Transforms/Scalar/LoopStrengthReduce.cpp test/CodeGen/X86/lsr-negative-stride.ll test/CodeGen/X86/remat-mov0.ll

Bill Wendling isanbard at gmail.com
Mon May 11 16:18:28 PDT 2009


Author: void
Date: Mon May 11 18:18:28 2009
New Revision: 71496

URL: http://llvm.org/viewvc/llvm-project?rev=71496&view=rev
Log:
--- Merging r71033 into '.':
U    lib/Transforms/Scalar/LoopStrengthReduce.cpp
--- Merging r71035 into '.':
G    lib/Transforms/Scalar/LoopStrengthReduce.cpp
--- Merging r71090 into '.':
G    lib/Transforms/Scalar/LoopStrengthReduce.cpp
--- Merging r71305 into '.':
G    lib/Transforms/Scalar/LoopStrengthReduce.cpp
Skipped 'test/CodeGen/X86/lsr-loop-exit-cond.ll'
Skipped 'test/CodeGen/X86/remat-mov-1.ll'
--- Merging r71485 into '.':
U    test/CodeGen/X86/lsr-negative-stride.ll
D    test/CodeGen/X86/remat-mov0.ll
G    lib/Transforms/Scalar/LoopStrengthReduce.cpp


Removed:
    llvm/branches/Apple/Dib/test/CodeGen/X86/remat-mov0.ll
Modified:
    llvm/branches/Apple/Dib/lib/Transforms/Scalar/LoopStrengthReduce.cpp
    llvm/branches/Apple/Dib/test/CodeGen/X86/lsr-negative-stride.ll

Modified: llvm/branches/Apple/Dib/lib/Transforms/Scalar/LoopStrengthReduce.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Transforms/Scalar/LoopStrengthReduce.cpp?rev=71496&r1=71495&r2=71496&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/Transforms/Scalar/LoopStrengthReduce.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Transforms/Scalar/LoopStrengthReduce.cpp Mon May 11 18:18:28 2009
@@ -42,6 +42,7 @@
 STATISTIC(NumEliminated,  "Number of strides eliminated");
 STATISTIC(NumShadow,      "Number of Shadow IVs optimized");
 STATISTIC(NumImmSunk,     "Number of common expr immediates sunk into uses");
+STATISTIC(NumLoopCond,    "Number of loop terminating conds optimized");
 
 static cl::opt<bool> EnableFullLSRMode("enable-full-lsr",
                                        cl::init(false),
@@ -121,6 +122,10 @@
     /// particular stride.
     std::map<SCEVHandle, IVsOfOneStride> IVsByStride;
 
+    /// StrideNoReuse - Keep track of all the strides whose ivs cannot be
+    /// reused (nor should they be rewritten to reuse other strides).
+    SmallSet<SCEVHandle, 4> StrideNoReuse;
+
     /// StrideOrder - An ordering of the keys in IVUsesByStride that is stable:
     /// We use this to iterate over the IVUsesByStride collection without being
     /// dependent on random ordering of pointers in the process.
@@ -163,8 +168,11 @@
     ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
                                   IVStrideUse* &CondUse,
                                   const SCEVHandle* &CondStride);
+
     void OptimizeIndvars(Loop *L);
     void OptimizeLoopCountIV(Loop *L);
+    void OptimizeLoopTermCond(Loop *L);
+
     /// OptimizeShadowIV - If IV is used in a int-to-float cast
     /// inside the loop then try to eliminate the cast opeation.
     void OptimizeShadowIV(Loop *L);
@@ -180,8 +188,8 @@
     SCEVHandle CheckForIVReuse(bool, bool, bool, const SCEVHandle&,
                              IVExpr&, const Type*,
                              const std::vector<BasedUser>& UsersToProcess);
-    bool ValidStride(bool, int64_t,
-                     const std::vector<BasedUser>& UsersToProcess);
+    bool ValidScale(bool, int64_t,
+                    const std::vector<BasedUser>& UsersToProcess);
     SCEVHandle CollectIVUsers(const SCEVHandle &Stride,
                               IVUsersOfOneStride &Uses,
                               Loop *L,
@@ -209,6 +217,7 @@
                                   SCEVHandle Stride,
                                   SCEVHandle CommonExprs,
                                   Value *CommonBaseV,
+                                  Instruction *IVIncInsertPt,
                                   const Loop *L,
                                   SCEVExpander &PreheaderRewriter);
     void StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
@@ -806,7 +815,7 @@
 /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
 /// loop varying to the Imm operand.
 static void MoveLoopVariantsToImmediateField(SCEVHandle &Val, SCEVHandle &Imm,
-                                            Loop *L, ScalarEvolution *SE) {
+                                             Loop *L, ScalarEvolution *SE) {
   if (Val->isLoopInvariant(L)) return;  // Nothing to do.
   
   if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
@@ -1129,16 +1138,15 @@
   return Result;
 }
 
-/// ValidStride - Check whether the given Scale is valid for all loads and 
+/// ValidScale - Check whether the given Scale is valid for all loads and 
 /// stores in UsersToProcess.
 ///
-bool LoopStrengthReduce::ValidStride(bool HasBaseReg,
-                               int64_t Scale, 
+bool LoopStrengthReduce::ValidScale(bool HasBaseReg, int64_t Scale,
                                const std::vector<BasedUser>& UsersToProcess) {
   if (!TLI)
     return true;
 
-  for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
+  for (unsigned i = 0, e = UsersToProcess.size(); i!=e; ++i) {
     // If this is a load or other access, pass the type of the access in.
     const Type *AccessTy = Type::VoidTy;
     if (isAddressUse(UsersToProcess[i].Inst,
@@ -1191,13 +1199,17 @@
                                 const SCEVHandle &Stride, 
                                 IVExpr &IV, const Type *Ty,
                                 const std::vector<BasedUser>& UsersToProcess) {
+  if (StrideNoReuse.count(Stride))
+    return SE->getIntegerSCEV(0, Stride->getType());
+
   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
     int64_t SInt = SC->getValue()->getSExtValue();
     for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
          ++NewStride) {
       std::map<SCEVHandle, IVsOfOneStride>::iterator SI = 
                 IVsByStride.find(StrideOrder[NewStride]);
-      if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
+      if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first) ||
+          StrideNoReuse.count(SI->first))
         continue;
       int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
       if (SI->first != Stride &&
@@ -1211,7 +1223,7 @@
       // multiplications.
       if (Scale == 1 ||
           (AllUsesAreAddresses &&
-           ValidStride(HasBaseReg, Scale, UsersToProcess)))
+           ValidScale(HasBaseReg, Scale, UsersToProcess)))
         for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
                IE = SI->second.IVs.end(); II != IE; ++II)
           // FIXME: Only handle base == 0 for now.
@@ -1307,7 +1319,7 @@
     // field of the use, so that we don't try to use something before it is
     // computed.
     MoveLoopVariantsToImmediateField(UsersToProcess.back().Base,
-                                    UsersToProcess.back().Imm, L, SE);
+                                     UsersToProcess.back().Imm, L, SE);
     assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
            "Base value is not loop invariant!");
   }
@@ -1457,6 +1469,7 @@
 /// Return the created phi node.
 ///
 static PHINode *InsertAffinePhi(SCEVHandle Start, SCEVHandle Step,
+                                Instruction *IVIncInsertPt,
                                 const Loop *L,
                                 SCEVExpander &Rewriter) {
   assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
@@ -1480,16 +1493,17 @@
     IncAmount = Rewriter.SE.getNegativeSCEV(Step);
 
   // Insert an add instruction right before the terminator corresponding
-  // to the back-edge.
+  // to the back-edge or just before the only use. The location is determined
+  // by the caller and passed in as IVIncInsertPt.
   Value *StepV = Rewriter.expandCodeFor(IncAmount, Ty,
                                         Preheader->getTerminator());
   Instruction *IncV;
   if (isNegative) {
     IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next",
-                                     LatchBlock->getTerminator());
+                                     IVIncInsertPt);
   } else {
     IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next",
-                                     LatchBlock->getTerminator());
+                                     IVIncInsertPt);
   }
   if (!isa<ConstantInt>(StepV)) ++NumVariable;
 
@@ -1546,6 +1560,7 @@
 
   // Rewrite the UsersToProcess records, creating a separate PHI for each
   // unique Base value.
+  Instruction *IVIncInsertPt = L->getLoopLatch()->getTerminator();
   for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
     // TODO: The uses are grouped by base, but not sorted. We arbitrarily
     // pick the first Imm value here to start with, and adjust it for the
@@ -1553,7 +1568,7 @@
     SCEVHandle Imm = UsersToProcess[i].Imm;
     SCEVHandle Base = UsersToProcess[i].Base;
     SCEVHandle Start = SE->getAddExpr(CommonExprs, Base, Imm);
-    PHINode *Phi = InsertAffinePhi(Start, Stride, L,
+    PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L,
                                    PreheaderRewriter);
     // Loop over all the users with the same base.
     do {
@@ -1566,6 +1581,18 @@
   }
 }
 
+/// FindIVIncInsertPt - Return the location to insert the increment instruction.
+/// If the only use if a use of postinc value, (must be the loop termination
+/// condition), then insert it just before the use.
+static Instruction *FindIVIncInsertPt(std::vector<BasedUser> &UsersToProcess,
+                                      const Loop *L) {
+  if (UsersToProcess.size() == 1 &&
+      UsersToProcess[0].isUseOfPostIncrementedValue &&
+      L->contains(UsersToProcess[0].Inst->getParent()))
+    return UsersToProcess[0].Inst;
+  return L->getLoopLatch()->getTerminator();
+}
+
 /// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the
 /// given users to share.
 ///
@@ -1575,12 +1602,13 @@
                                          SCEVHandle Stride,
                                          SCEVHandle CommonExprs,
                                          Value *CommonBaseV,
+                                         Instruction *IVIncInsertPt,
                                          const Loop *L,
                                          SCEVExpander &PreheaderRewriter) {
   DOUT << "  Inserting new PHI:\n";
 
   PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
-                                 Stride, L,
+                                 Stride, IVIncInsertPt, L,
                                  PreheaderRewriter);
 
   // Remember this in case a later stride is multiple of this.
@@ -1595,8 +1623,8 @@
   DOUT << "\n";
 }
 
-/// PrepareToStrengthReduceWithNewPhi - Prepare for the given users to reuse
-/// an induction variable with a stride that is a factor of the current
+/// PrepareToStrengthReduceFromSmallerStride - Prepare for the given users to
+/// reuse an induction variable with a stride that is a factor of the current
 /// induction variable.
 ///
 void
@@ -1732,6 +1760,7 @@
   BasicBlock  *Preheader = L->getLoopPreheader();
   Instruction *PreInsertPt = Preheader->getTerminator();
   BasicBlock *LatchBlock = L->getLoopLatch();
+  Instruction *IVIncInsertPt = LatchBlock->getTerminator();
 
   Value *CommonBaseV = Constant::getNullValue(ReplacedTy);
 
@@ -1760,13 +1789,15 @@
                                     AllUsesAreOutsideLoop,
                                     Stride, ReuseIV, ReplacedTy,
                                     UsersToProcess);
-    if (isa<SCEVConstant>(RewriteFactor) &&
-        cast<SCEVConstant>(RewriteFactor)->isZero())
-      PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs,
-                                        CommonBaseV, L, PreheaderRewriter);
-    else
+    if (!RewriteFactor->isZero())
       PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV,
                                                ReuseIV, PreInsertPt);
+    else {
+      IVIncInsertPt = FindIVIncInsertPt(UsersToProcess, L);
+      PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs,
+                                        CommonBaseV, IVIncInsertPt,
+                                        L, PreheaderRewriter);
+    }
   }
 
   // Process all the users now, replacing their strided uses with
@@ -1805,7 +1836,12 @@
       // FIXME: Use emitted users to emit other users.
       BasedUser &User = UsersToProcess.back();
 
-      DOUT << "    Examining use ";
+      DOUT << "    Examining ";
+      if (User.isUseOfPostIncrementedValue)
+        DOUT << "postinc";
+      else
+        DOUT << "preinc";
+      DOUT << " use ";
       DEBUG(WriteAsOperand(*DOUT, UsersToProcess.back().OperandValToReplace,
                            /*PrintType=*/false));
       DOUT << " in Inst: " << *Inst;
@@ -1815,11 +1851,12 @@
       Value *RewriteOp = User.Phi;
       if (User.isUseOfPostIncrementedValue) {
         RewriteOp = User.Phi->getIncomingValueForBlock(LatchBlock);
-
         // If this user is in the loop, make sure it is the last thing in the
-        // loop to ensure it is dominated by the increment.
-        if (L->contains(User.Inst->getParent()))
-          User.Inst->moveBefore(LatchBlock->getTerminator());
+        // loop to ensure it is dominated by the increment. In case it's the
+        // only use of the iv, the increment instruction is already before the
+        // use.
+        if (L->contains(User.Inst->getParent()) && User.Inst != IVIncInsertPt)
+          User.Inst->moveBefore(IVIncInsertPt);
       }
 
       SCEVHandle RewriteExpr = SE->getUnknown(RewriteOp);
@@ -2035,9 +2072,10 @@
 
       Scale = SSInt / CmpSSInt;
       int64_t NewCmpVal = CmpVal * Scale;
-      APInt Mul = APInt(BitWidth, NewCmpVal);
+      APInt Mul = APInt(BitWidth*2, CmpVal, true);
+      Mul = Mul * APInt(BitWidth*2, Scale, true);
       // Check for overflow.
-      if (Mul.getSExtValue() != NewCmpVal)
+      if (!Mul.isSignedIntN(BitWidth)) {
         continue;
 
       // Watch out for overflow.
@@ -2089,7 +2127,7 @@
       // if it's likely the new stride uses will be rewritten using the
       // stride of the compare instruction.
       if (AllUsesAreAddresses &&
-          ValidStride(!CommonExprs->isZero(), Scale, UsersToProcess))
+          ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
         continue;
 
       // If scale is negative, use swapped predicate unless it's testing
@@ -2312,8 +2350,8 @@
       if (!DestTy) continue;
 
       if (TLI) {
-        /* If target does not support DestTy natively then do not apply
-           this transformation. */
+        // If target does not support DestTy natively then do not apply
+        // this transformation.
         MVT DVT = TLI->getValueType(DestTy);
         if (!TLI->isTypeLegal(DVT)) continue;
       }
@@ -2389,28 +2427,87 @@
   // TODO: implement optzns here.
 
   OptimizeShadowIV(L);
+}
 
+/// OptimizeLoopTermCond - Change loop terminating condition to use the 
+/// postinc iv when possible.
+void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
   // Finally, get the terminating condition for the loop if possible.  If we
   // can, we want to change it to use a post-incremented version of its
   // induction variable, to allow coalescing the live ranges for the IV into
   // one register value.
-  PHINode *SomePHI = cast<PHINode>(L->getHeader()->begin());
-  BasicBlock  *Preheader = L->getLoopPreheader();
-  BasicBlock *LatchBlock =
-   SomePHI->getIncomingBlock(SomePHI->getIncomingBlock(0) == Preheader);
-  BranchInst *TermBr = dyn_cast<BranchInst>(LatchBlock->getTerminator());
-  if (!TermBr || TermBr->isUnconditional() || 
-      !isa<ICmpInst>(TermBr->getCondition()))
+  BasicBlock *LatchBlock = L->getLoopLatch();
+  BasicBlock *ExitBlock = L->getExitingBlock();
+  if (!ExitBlock)
+    // Multiple exits, just look at the exit in the latch block if there is one.
+    ExitBlock = LatchBlock;
+  BranchInst *TermBr = dyn_cast<BranchInst>(ExitBlock->getTerminator());
+  if (!TermBr)
+    return;
+  if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
     return;
-  ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
 
   // Search IVUsesByStride to find Cond's IVUse if there is one.
   IVStrideUse *CondUse = 0;
   const SCEVHandle *CondStride = 0;
-
+  ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
   if (!FindIVUserForCond(Cond, CondUse, CondStride))
     return; // setcc doesn't use the IV.
 
+  if (ExitBlock != LatchBlock) {
+    if (!Cond->hasOneUse())
+      // See below, we don't want the condition to be cloned.
+      return;
+
+    // If exiting block is the latch block, we know it's safe and profitable to
+    // transform the icmp to use post-inc iv. Otherwise do so only if it would
+    // not reuse another iv and its iv would be reused by other uses. We are
+    // optimizing for the case where the icmp is the only use of the iv.
+    IVUsersOfOneStride &StrideUses = IVUsesByStride[*CondStride];
+    for (unsigned i = 0, e = StrideUses.Users.size(); i != e; ++i) {
+      if (StrideUses.Users[i].User == Cond)
+        continue;
+      if (!StrideUses.Users[i].isUseOfPostIncrementedValue)
+        return;
+    }
+
+    // FIXME: This is expensive, and worse still ChangeCompareStride does a
+    // similar check. Can we perform all the icmp related transformations after
+    // StrengthReduceStridedIVUsers?
+    if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride)) {
+      int64_t SInt = SC->getValue()->getSExtValue();
+      for (unsigned NewStride = 0, ee = StrideOrder.size(); NewStride != ee;
+           ++NewStride) {
+        std::map<SCEVHandle, IVUsersOfOneStride>::iterator SI = 
+          IVUsesByStride.find(StrideOrder[NewStride]);
+        if (!isa<SCEVConstant>(SI->first) || SI->first == *CondStride)
+          continue;
+        int64_t SSInt =
+          cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
+        if (SSInt == SInt)
+          return; // This can definitely be reused.
+        if (unsigned(abs(SSInt)) < SInt || (SSInt % SInt) != 0)
+          continue;
+        int64_t Scale = SSInt / SInt;
+        bool AllUsesAreAddresses = true;
+        bool AllUsesAreOutsideLoop = true;
+        std::vector<BasedUser> UsersToProcess;
+        SCEVHandle CommonExprs = CollectIVUsers(SI->first, SI->second, L,
+                                                AllUsesAreAddresses,
+                                                AllUsesAreOutsideLoop,
+                                                UsersToProcess);
+        // Avoid rewriting the compare instruction with an iv of new stride
+        // if it's likely the new stride uses will be rewritten using the
+        // stride of the compare instruction.
+        if (AllUsesAreAddresses &&
+            ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
+          return;
+      }
+    }
+
+    StrideNoReuse.insert(*CondStride);
+  }
+
   // If the trip count is computed in terms of an smax (due to ScalarEvolution
   // being unable to find a sufficient guard, for example), change the loop
   // comparison to use SLT instead of NE.
@@ -2418,7 +2515,8 @@
 
   // If possible, change stride and operands of the compare instruction to
   // eliminate one stride.
-  Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
+  if (ExitBlock == LatchBlock)
+    Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
 
   // It's possible for the setcc instruction to be anywhere in the loop, and
   // possible for it to have multiple users.  If it is not immediately before
@@ -2434,7 +2532,7 @@
       
       // Clone the IVUse, as the old use still exists!
       IVUsesByStride[*CondStride].addUser(CondUse->Offset, Cond,
-                                         CondUse->OperandValToReplace);
+                                          CondUse->OperandValToReplace);
       CondUse = &IVUsesByStride[*CondStride].Users.back();
     }
   }
@@ -2445,6 +2543,8 @@
   CondUse->Offset = SE->getMinusSCEV(CondUse->Offset, *CondStride);
   CondUse->isUseOfPostIncrementedValue = true;
   Changed = true;
+
+  ++NumLoopCond;
 }
 
 // OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for deciding
@@ -2585,8 +2685,14 @@
     // computation of some other indvar to decide when to terminate the loop.
     OptimizeIndvars(L);
 
-    // FIXME: We can widen subreg IV's here for RISC targets.  e.g. instead of
-    // doing computation in byte values, promote to 32-bit values if safe.
+    // Change loop terminating condition to use the postinc iv when possible
+    // and optimize loop terminating compare. FIXME: Move this after
+    // StrengthReduceStridedIVUsers?
+    OptimizeLoopTermCond(L);
+
+    // FIXME: We can shrink overlarge IV's here.  e.g. if the code has
+    // computation in i64 values and the target doesn't support i64, demote
+    // the computation to 32-bit if safe.
 
     // FIXME: Attempt to reuse values across multiple IV's.  In particular, we
     // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
@@ -2618,6 +2724,7 @@
   IVUsesByStride.clear();
   IVsByStride.clear();
   StrideOrder.clear();
+  StrideNoReuse.clear();
 
   // Clean up after ourselves
   if (!DeadInsts.empty()) {

Modified: llvm/branches/Apple/Dib/test/CodeGen/X86/lsr-negative-stride.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/lsr-negative-stride.ll?rev=71496&r1=71495&r2=71496&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/lsr-negative-stride.ll (original)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/lsr-negative-stride.ll Mon May 11 18:18:28 2009
@@ -16,7 +16,7 @@
 ;}
 
 
-define i32 @t(i32 %a, i32 %b) {
+define i32 @t(i32 %a, i32 %b) nounwind {
 entry:
 	%tmp1434 = icmp eq i32 %a, %b		; <i1> [#uses=1]
 	br i1 %tmp1434, label %bb17, label %bb.outer

Removed: llvm/branches/Apple/Dib/test/CodeGen/X86/remat-mov0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/remat-mov0.ll?rev=71495&view=auto

==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/remat-mov0.ll (original)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/remat-mov0.ll (removed)
@@ -1,40 +0,0 @@
-; RUN: llvm-as < %s | llc -march=x86 | grep xor | count 2
-
-	%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
-	%struct.ImgT = type { i8, i8*, i8*, %struct.FILE*, i32, i32, i32, i32, i8*, double*, float*, float*, float*, i32*, double, double, i32*, double*, i32*, i32* }
-	%struct._CompT = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, i8, %struct._PixT*, %struct._CompT*, i8, %struct._CompT* }
-	%struct._PixT = type { i32, i32, %struct._PixT* }
-	%struct.__sFILEX = type opaque
-	%struct.__sbuf = type { i8*, i32 }
-
-declare fastcc void @MergeComponents(%struct._CompT*, %struct._CompT*, %struct._CompT*, %struct._CompT**, %struct.ImgT*) nounwind 
-
-define fastcc void @MergeToLeft(%struct._CompT* %comp, %struct._CompT** %head, %struct.ImgT* %img) nounwind  {
-entry:
-	br label %bb208
-
-bb105:		; preds = %bb200
-	br i1 false, label %bb197, label %bb149
-
-bb149:		; preds = %bb105
-	%tmp151 = getelementptr %struct._CompT* %comp, i32 0, i32 0		; <i32*> [#uses=1]
-	br label %bb193
-
-bb193:		; preds = %bb184, %bb149
-	%tmp196 = load i32* %tmp151, align 4		; <i32> [#uses=1]
-	br label %bb197
-
-bb197:		; preds = %bb193, %bb105
-	%last_comp.0 = phi i32 [ %tmp196, %bb193 ], [ 0, %bb105 ]		; <i32> [#uses=0]
-	%indvar.next = add i32 %indvar, 1		; <i32> [#uses=1]
-	br label %bb200
-
-bb200:		; preds = %bb208, %bb197
-	%indvar = phi i32 [ 0, %bb208 ], [ %indvar.next, %bb197 ]		; <i32> [#uses=2]
-	%xm.0 = sub i32 %indvar, 0		; <i32> [#uses=1]
-	%tmp202 = icmp slt i32 %xm.0, 1		; <i1> [#uses=1]
-	br i1 %tmp202, label %bb105, label %bb208
-
-bb208:		; preds = %bb200, %entry
-	br label %bb200
-}





More information about the llvm-branch-commits mailing list