[llvm] r194424 - Fix PR17952.
Nadav Rotem
nrotem at apple.com
Tue Nov 12 11:05:07 PST 2013
This commit reapplies r191017 which was reverted in r194348 :
http://llvm.org/viewvc/llvm-project?view=revision&revision=191017
On Nov 11, 2013, at 10:39 PM, Bob Wilson <bob.wilson at apple.com> wrote:
> I wondered the same thing. It looks like the correct PR is 17852.
>
> On Nov 11, 2013, at 10:26 PM, Chris Lattner <clattner at apple.com> wrote:
>
>>
>> On Nov 11, 2013, at 2:00 PM, Shuxin Yang <shuxin.llvm at gmail.com> wrote:
>>
>>> Author: shuxin_yang
>>> Date: Mon Nov 11 16:00:23 2013
>>> New Revision: 194424
>>>
>>> URL: http://llvm.org/viewvc/llvm-project?rev=194424&view=rev
>>> Log:
>>> Fix PR17952.
>>>
>>> The symptom is that an assertion is triggered. The assertion was added by
>>> me to detect the situation when value is propagated from dead blocks.
>>> (We can certainly get rid of assertion; it is safe to do so, because propagating
>>> value from dead block to alive join node is certainly ok.)
>>>
>>> The root cause of this bug is : edge-splitting is conducted on the fly,
>>> the edge being split could be a dead edge, therefore the block that
>>> split the critial edge needs to be flagged "dead" as well.
>>>
>>> There are 3 ways to fix this bug:
>>> 1) Get rid of the assertion as I mentioned eariler
>>> 2) When an dead edge is split, flag the inserted block "dead".
>>> 3) proactively split the critical edges connecting dead and live blocks when
>>> new dead blocks are revealed.
>>>
>>> This fix go for 3) with additional 2 LOC.
>>
>> Hi Shuxin,
>>
>> What bugzilla does this fix? PR17952 is not a valid bugzilla ID:
>> http://llvm.org/bugs/show_bug.cgi?id=17952
>>
>> -Chris
>>
>>>
>>> Testing case was added by Rafael the other day.
>>>
>>> Added:
>>> llvm/trunk/test/Transforms/GVN/cond_br.ll
>>> - copied unchanged from r194347, llvm/trunk/test/Transforms/GVN/cond_br.ll
>>> llvm/trunk/test/Transforms/GVN/cond_br2.ll
>>> - copied unchanged from r194347, llvm/trunk/test/Transforms/GVN/cond_br2.ll
>>> Modified:
>>> llvm/trunk/lib/Transforms/Scalar/GVN.cpp
>>> llvm/trunk/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
>>> llvm/trunk/test/Transforms/GVN/2008-07-02-Unreachable.ll
>>> llvm/trunk/test/Transforms/GVN/local-pre.ll
>>> llvm/trunk/test/Transforms/GVN/rle-nonlocal.ll
>>> llvm/trunk/test/Transforms/GVN/rle-semidominated.ll
>>> llvm/trunk/test/Transforms/GVN/rle.ll
>>>
>>> Modified: llvm/trunk/lib/Transforms/Scalar/GVN.cpp
>>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GVN.cpp?rev=194424&r1=194423&r2=194424&view=diff
>>> ==============================================================================
>>> --- llvm/trunk/lib/Transforms/Scalar/GVN.cpp (original)
>>> +++ llvm/trunk/lib/Transforms/Scalar/GVN.cpp Mon Nov 11 16:00:23 2013
>>> @@ -21,6 +21,7 @@
>>> #include "llvm/ADT/DepthFirstIterator.h"
>>> #include "llvm/ADT/Hashing.h"
>>> #include "llvm/ADT/SmallPtrSet.h"
>>> +#include "llvm/ADT/SetVector.h"
>>> #include "llvm/ADT/Statistic.h"
>>> #include "llvm/Analysis/AliasAnalysis.h"
>>> #include "llvm/Analysis/CFG.h"
>>> @@ -507,7 +508,9 @@ namespace {
>>> enum ValType {
>>> SimpleVal, // A simple offsetted value that is accessed.
>>> LoadVal, // A value produced by a load.
>>> - MemIntrin // A memory intrinsic which is loaded from.
>>> + MemIntrin, // A memory intrinsic which is loaded from.
>>> + UndefVal // A UndefValue representing a value from dead block (which
>>> + // is not yet physically removed from the CFG).
>>> };
>>>
>>> /// V - The value that is live out of the block.
>>> @@ -545,10 +548,20 @@ namespace {
>>> Res.Offset = Offset;
>>> return Res;
>>> }
>>> -
>>> +
>>> + static AvailableValueInBlock getUndef(BasicBlock *BB) {
>>> + AvailableValueInBlock Res;
>>> + Res.BB = BB;
>>> + Res.Val.setPointer(0);
>>> + Res.Val.setInt(UndefVal);
>>> + Res.Offset = 0;
>>> + return Res;
>>> + }
>>> +
>>> bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
>>> bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; }
>>> bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; }
>>> + bool isUndefValue() const { return Val.getInt() == UndefVal; }
>>>
>>> Value *getSimpleValue() const {
>>> assert(isSimpleValue() && "Wrong accessor");
>>> @@ -576,6 +589,7 @@ namespace {
>>> DominatorTree *DT;
>>> const DataLayout *TD;
>>> const TargetLibraryInfo *TLI;
>>> + SetVector<BasicBlock *> DeadBlocks;
>>>
>>> ValueTable VN;
>>>
>>> @@ -698,6 +712,9 @@ namespace {
>>> unsigned replaceAllDominatedUsesWith(Value *From, Value *To,
>>> const BasicBlockEdge &Root);
>>> bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root);
>>> + bool processFoldableCondBr(BranchInst *BI);
>>> + void addDeadBlock(BasicBlock *BB);
>>> + void assignValNumForDeadCode();
>>> };
>>>
>>> char GVN::ID = 0;
>>> @@ -1255,8 +1272,10 @@ static Value *ConstructSSAForLoadSet(Loa
>>> // just use the dominating value directly.
>>> if (ValuesPerBlock.size() == 1 &&
>>> gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
>>> - LI->getParent()))
>>> + LI->getParent())) {
>>> + assert(!ValuesPerBlock[0].isUndefValue() && "Dead BB dominate this block");
>>> return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), gvn);
>>> + }
>>>
>>> // Otherwise, we have to construct SSA form.
>>> SmallVector<PHINode*, 8> NewPHIs;
>>> @@ -1326,7 +1345,7 @@ Value *AvailableValueInBlock::Materializ
>>> << *getCoercedLoadValue() << '\n'
>>> << *Res << '\n' << "\n\n\n");
>>> }
>>> - } else {
>>> + } else if (isMemIntrinValue()) {
>>> const DataLayout *TD = gvn.getDataLayout();
>>> assert(TD && "Need target data to handle type mismatch case");
>>> Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
>>> @@ -1334,6 +1353,10 @@ Value *AvailableValueInBlock::Materializ
>>> DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
>>> << " " << *getMemIntrinValue() << '\n'
>>> << *Res << '\n' << "\n\n\n");
>>> + } else {
>>> + assert(isUndefValue() && "Should be UndefVal");
>>> + DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";);
>>> + return UndefValue::get(LoadTy);
>>> }
>>> return Res;
>>> }
>>> @@ -1357,6 +1380,13 @@ void GVN::AnalyzeLoadAvailability(LoadIn
>>> BasicBlock *DepBB = Deps[i].getBB();
>>> MemDepResult DepInfo = Deps[i].getResult();
>>>
>>> + if (DeadBlocks.count(DepBB)) {
>>> + // Dead dependent mem-op disguise as a load evaluating the same value
>>> + // as the load in question.
>>> + ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
>>> + continue;
>>> + }
>>> +
>>> if (!DepInfo.isDef() && !DepInfo.isClobber()) {
>>> UnavailableBlocks.push_back(DepBB);
>>> continue;
>>> @@ -2193,11 +2223,13 @@ bool GVN::processInstruction(Instruction
>>> // For conditional branches, we can perform simple conditional propagation on
>>> // the condition value itself.
>>> if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
>>> - if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
>>> + if (!BI->isConditional())
>>> return false;
>>>
>>> - Value *BranchCond = BI->getCondition();
>>> + if (isa<Constant>(BI->getCondition()))
>>> + return processFoldableCondBr(BI);
>>>
>>> + Value *BranchCond = BI->getCondition();
>>> BasicBlock *TrueSucc = BI->getSuccessor(0);
>>> BasicBlock *FalseSucc = BI->getSuccessor(1);
>>> // Avoid multiple edges early.
>>> @@ -2314,6 +2346,9 @@ bool GVN::runOnFunction(Function& F) {
>>> }
>>>
>>> if (EnablePRE) {
>>> + // Fabricate val-num for dead-code in order to suppress assertion in
>>> + // performPRE().
>>> + assignValNumForDeadCode();
>>> bool PREChanged = true;
>>> while (PREChanged) {
>>> PREChanged = performPRE(F);
>>> @@ -2327,6 +2362,9 @@ bool GVN::runOnFunction(Function& F) {
>>> // Actually, when this happens, we should just fully integrate PRE into GVN.
>>>
>>> cleanupGlobalSets();
>>> + // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
>>> + // iteration.
>>> + DeadBlocks.clear();
>>>
>>> return Changed;
>>> }
>>> @@ -2337,6 +2375,9 @@ bool GVN::processBlock(BasicBlock *BB) {
>>> // (and incrementing BI before processing an instruction).
>>> assert(InstrsToErase.empty() &&
>>> "We expect InstrsToErase to be empty across iterations");
>>> + if (DeadBlocks.count(BB))
>>> + return false;
>>> +
>>> bool ChangedFunction = false;
>>>
>>> for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
>>> @@ -2630,3 +2671,131 @@ void GVN::verifyRemoved(const Instructio
>>> }
>>> }
>>> }
>>> +
>>> +// BB is declared dead, which implied other blocks become dead as well. This
>>> +// function is to add all these blocks to "DeadBlocks". For the dead blocks'
>>> +// live successors, update their phi nodes by replacing the operands
>>> +// corresponding to dead blocks with UndefVal.
>>> +//
>>> +void GVN::addDeadBlock(BasicBlock *BB) {
>>> + SmallVector<BasicBlock *, 4> NewDead;
>>> + SmallSetVector<BasicBlock *, 4> DF;
>>> +
>>> + NewDead.push_back(BB);
>>> + while (!NewDead.empty()) {
>>> + BasicBlock *D = NewDead.pop_back_val();
>>> + if (DeadBlocks.count(D))
>>> + continue;
>>> +
>>> + // All blocks dominated by D are dead.
>>> + SmallVector<BasicBlock *, 8> Dom;
>>> + DT->getDescendants(D, Dom);
>>> + DeadBlocks.insert(Dom.begin(), Dom.end());
>>> +
>>> + // Figure out the dominance-frontier(D).
>>> + for (SmallVectorImpl<BasicBlock *>::iterator I = Dom.begin(),
>>> + E = Dom.end(); I != E; I++) {
>>> + BasicBlock *B = *I;
>>> + for (succ_iterator SI = succ_begin(B), SE = succ_end(B); SI != SE; SI++) {
>>> + BasicBlock *S = *SI;
>>> + if (DeadBlocks.count(S))
>>> + continue;
>>> +
>>> + bool AllPredDead = true;
>>> + for (pred_iterator PI = pred_begin(S), PE = pred_end(S); PI != PE; PI++)
>>> + if (!DeadBlocks.count(*PI)) {
>>> + AllPredDead = false;
>>> + break;
>>> + }
>>> +
>>> + if (!AllPredDead) {
>>> + // S could be proved dead later on. That is why we don't update phi
>>> + // operands at this moment.
>>> + DF.insert(S);
>>> + } else {
>>> + // While S is not dominated by D, it is dead by now. This could take
>>> + // place if S already have a dead predecessor before D is declared
>>> + // dead.
>>> + NewDead.push_back(S);
>>> + }
>>> + }
>>> + }
>>> + }
>>> +
>>> + // For the dead blocks' live successors, update their phi nodes by replacing
>>> + // the operands corresponding to dead blocks with UndefVal.
>>> + for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end();
>>> + I != E; I++) {
>>> + BasicBlock *B = *I;
>>> + if (DeadBlocks.count(B))
>>> + continue;
>>> +
>>> + for (pred_iterator PI = pred_begin(B), PE = pred_end(B); PI != PE; PI++) {
>>> + BasicBlock *P = *PI;
>>> +
>>> + if (!DeadBlocks.count(P))
>>> + continue;
>>> +
>>> + if (isCriticalEdge(P->getTerminator(), GetSuccessorNumber(P, B))) {
>>> + if (BasicBlock *S = splitCriticalEdges(P, B))
>>> + DeadBlocks.insert(P = S);
>>> + }
>>> +
>>> + for (BasicBlock::iterator II = B->begin(); isa<PHINode>(II); ++II) {
>>> + PHINode &Phi = cast<PHINode>(*II);
>>> + Phi.setIncomingValue(Phi.getBasicBlockIndex(P),
>>> + UndefValue::get(Phi.getType()));
>>> + }
>>> + }
>>> + }
>>> +}
>>> +
>>> +// If the given branch is recognized as a foldable branch (i.e. conditional
>>> +// branch with constant condition), it will perform following analyses and
>>> +// transformation.
>>> +// 1) If the dead out-coming edge is a critical-edge, split it. Let
>>> +// R be the target of the dead out-coming edge.
>>> +// 1) Identify the set of dead blocks implied by the branch's dead outcoming
>>> +// edge. The result of this step will be {X| X is dominated by R}
>>> +// 2) Identify those blocks which haves at least one dead prodecessor. The
>>> +// result of this step will be dominance-frontier(R).
>>> +// 3) Update the PHIs in DF(R) by replacing the operands corresponding to
>>> +// dead blocks with "UndefVal" in an hope these PHIs will optimized away.
>>> +//
>>> +// Return true iff *NEW* dead code are found.
>>> +bool GVN::processFoldableCondBr(BranchInst *BI) {
>>> + if (!BI || BI->isUnconditional())
>>> + return false;
>>> +
>>> + ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
>>> + if (!Cond)
>>> + return false;
>>> +
>>> + BasicBlock *DeadRoot = Cond->getZExtValue() ?
>>> + BI->getSuccessor(1) : BI->getSuccessor(0);
>>> + if (DeadBlocks.count(DeadRoot))
>>> + return false;
>>> +
>>> + if (!DeadRoot->getSinglePredecessor())
>>> + DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
>>> +
>>> + addDeadBlock(DeadRoot);
>>> + return true;
>>> +}
>>> +
>>> +// performPRE() will trigger assert if it come across an instruciton without
>>> +// associated val-num. As it normally has far more live instructions than dead
>>> +// instructions, it makes more sense just to "fabricate" a val-number for the
>>> +// dead code than checking if instruction involved is dead or not.
>>> +void GVN::assignValNumForDeadCode() {
>>> + for (SetVector<BasicBlock *>::iterator I = DeadBlocks.begin(),
>>> + E = DeadBlocks.end(); I != E; I++) {
>>> + BasicBlock *BB = *I;
>>> + for (BasicBlock::iterator II = BB->begin(), EE = BB->end();
>>> + II != EE; II++) {
>>> + Instruction *Inst = &*II;
>>> + unsigned ValNum = VN.lookup_or_add(Inst);
>>> + addToLeaderTable(ValNum, Inst, BB);
>>> + }
>>> + }
>>> +}
>>>
>>> Modified: llvm/trunk/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
>>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll?rev=194424&r1=194423&r2=194424&view=diff
>>> ==============================================================================
>>> --- llvm/trunk/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll (original)
>>> +++ llvm/trunk/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll Mon Nov 11 16:00:23 2013
>>> @@ -2,7 +2,7 @@
>>>
>>> @last = external global [65 x i32*]
>>>
>>> -define i32 @NextRootMove(i32 %wtm) {
>>> +define i32 @NextRootMove(i32 %wtm, i32 %x, i32 %y, i32 %z) {
>>> entry:
>>> %A = alloca i32*
>>> %tmp17618 = load i32** getelementptr ([65 x i32*]* @last, i32 0, i32 1), align 4
>>> @@ -15,12 +15,14 @@ entry:
>>> br label %cond_true116
>>>
>>> cond_true116:
>>> - br i1 false, label %cond_true128, label %cond_true145
>>> + %cmp = icmp eq i32 %x, %y
>>> + br i1 %cmp, label %cond_true128, label %cond_true145
>>>
>>> cond_true128:
>>> %tmp17625 = load i32** getelementptr ([65 x i32*]* @last, i32 0, i32 1), align 4
>>> store i32* %tmp17625, i32** %A
>>> - br i1 false, label %bb98.backedge, label %return.loopexit
>>> + %cmp1 = icmp eq i32 %x, %z
>>> + br i1 %cmp1 , label %bb98.backedge, label %return.loopexit
>>>
>>> bb98.backedge:
>>> br label %cond_true116
>>>
>>> Modified: llvm/trunk/test/Transforms/GVN/2008-07-02-Unreachable.ll
>>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GVN/2008-07-02-Unreachable.ll?rev=194424&r1=194423&r2=194424&view=diff
>>> ==============================================================================
>>> --- llvm/trunk/test/Transforms/GVN/2008-07-02-Unreachable.ll (original)
>>> +++ llvm/trunk/test/Transforms/GVN/2008-07-02-Unreachable.ll Mon Nov 11 16:00:23 2013
>>> @@ -3,10 +3,11 @@
>>>
>>> @g_3 = external global i8 ; <i8*> [#uses=2]
>>>
>>> -define i8 @func_1() nounwind {
>>> +define i8 @func_1(i32 %x, i32 %y) nounwind {
>>> entry:
>>> %A = alloca i8
>>> - br i1 false, label %ifelse, label %ifthen
>>> + %cmp = icmp eq i32 %x, %y
>>> + br i1 %cmp, label %ifelse, label %ifthen
>>>
>>> ifthen: ; preds = %entry
>>> br label %ifend
>>> @@ -14,9 +15,6 @@ ifthen: ; preds = %entry
>>> ifelse: ; preds = %entry
>>> %tmp3 = load i8* @g_3 ; <i8> [#uses=0]
>>> store i8 %tmp3, i8* %A
>>> - br label %forcond.thread
>>> -
>>> -forcond.thread: ; preds = %ifelse
>>> br label %afterfor
>>>
>>> forcond: ; preds = %forinc
>>>
>>> Modified: llvm/trunk/test/Transforms/GVN/local-pre.ll
>>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GVN/local-pre.ll?rev=194424&r1=194423&r2=194424&view=diff
>>> ==============================================================================
>>> --- llvm/trunk/test/Transforms/GVN/local-pre.ll (original)
>>> +++ llvm/trunk/test/Transforms/GVN/local-pre.ll Mon Nov 11 16:00:23 2013
>>> @@ -1,9 +1,9 @@
>>> ; RUN: opt < %s -gvn -enable-pre -S | grep "b.pre"
>>>
>>> -define i32 @main(i32 %p) {
>>> +define i32 @main(i32 %p, i32 %q) {
>>> block1:
>>> -
>>> - br i1 true, label %block2, label %block3
>>> + %cmp = icmp eq i32 %p, %q
>>> + br i1 %cmp, label %block2, label %block3
>>>
>>> block2:
>>> %a = add i32 %p, 1
>>>
>>> Modified: llvm/trunk/test/Transforms/GVN/rle-nonlocal.ll
>>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GVN/rle-nonlocal.ll?rev=194424&r1=194423&r2=194424&view=diff
>>> ==============================================================================
>>> --- llvm/trunk/test/Transforms/GVN/rle-nonlocal.ll (original)
>>> +++ llvm/trunk/test/Transforms/GVN/rle-nonlocal.ll Mon Nov 11 16:00:23 2013
>>> @@ -1,8 +1,9 @@
>>> ; RUN: opt < %s -basicaa -gvn -S | FileCheck %s
>>>
>>> -define i32 @main(i32** %p) {
>>> +define i32 @main(i32** %p, i32 %x, i32 %y) {
>>> block1:
>>> - br i1 true, label %block2, label %block3
>>> + %cmp = icmp eq i32 %x, %y
>>> + br i1 %cmp , label %block2, label %block3
>>>
>>> block2:
>>> %a = load i32** %p
>>>
>>> Modified: llvm/trunk/test/Transforms/GVN/rle-semidominated.ll
>>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GVN/rle-semidominated.ll?rev=194424&r1=194423&r2=194424&view=diff
>>> ==============================================================================
>>> --- llvm/trunk/test/Transforms/GVN/rle-semidominated.ll (original)
>>> +++ llvm/trunk/test/Transforms/GVN/rle-semidominated.ll Mon Nov 11 16:00:23 2013
>>> @@ -1,9 +1,10 @@
>>> ; RUN: opt < %s -basicaa -gvn -S | grep "DEAD = phi i32 "
>>>
>>> -define i32 @main(i32* %p) {
>>> +define i32 @main(i32* %p, i32 %x, i32 %y) {
>>> block1:
>>> %z = load i32* %p
>>> - br i1 true, label %block2, label %block3
>>> + %cmp = icmp eq i32 %x, %y
>>> + br i1 %cmp, label %block2, label %block3
>>>
>>> block2:
>>> br label %block4
>>>
>>> Modified: llvm/trunk/test/Transforms/GVN/rle.ll
>>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GVN/rle.ll?rev=194424&r1=194423&r2=194424&view=diff
>>> ==============================================================================
>>> --- llvm/trunk/test/Transforms/GVN/rle.ll (original)
>>> +++ llvm/trunk/test/Transforms/GVN/rle.ll Mon Nov 11 16:00:23 2013
>>> @@ -369,13 +369,14 @@ Cont:
>>> ; CHECK: ret i8 %A
>>> }
>>>
>>> -define i32 @chained_load(i32** %p) {
>>> +define i32 @chained_load(i32** %p, i32 %x, i32 %y) {
>>> block1:
>>> %A = alloca i32*
>>>
>>> %z = load i32** %p
>>> store i32* %z, i32** %A
>>> - br i1 true, label %block2, label %block3
>>> + %cmp = icmp eq i32 %x, %y
>>> + br i1 %cmp, label %block2, label %block3
>>>
>>> block2:
>>> %a = load i32** %p
>>> @@ -439,10 +440,11 @@ TY:
>>> ret i32 0
>>> }
>>>
>>> -define i32 @phi_trans3(i32* %p) {
>>> +define i32 @phi_trans3(i32* %p, i32 %x, i32 %y, i32 %z) {
>>> ; CHECK-LABEL: @phi_trans3(
>>> block1:
>>> - br i1 true, label %block2, label %block3
>>> + %cmpxy = icmp eq i32 %x, %y
>>> + br i1 %cmpxy, label %block2, label %block3
>>>
>>> block2:
>>> store i32 87, i32* %p
>>> @@ -455,7 +457,7 @@ block3:
>>>
>>> block4:
>>> %A = phi i32 [-1, %block2], [42, %block3]
>>> - br i1 true, label %block5, label %exit
>>> + br i1 %cmpxy, label %block5, label %exit
>>>
>>> ; CHECK: block4:
>>> ; CHECK-NEXT: %D = phi i32 [ 87, %block2 ], [ 97, %block3 ]
>>> @@ -463,11 +465,11 @@ block4:
>>>
>>> block5:
>>> %B = add i32 %A, 1
>>> - br i1 true, label %block6, label %exit
>>> + br i1 %cmpxy, label %block6, label %exit
>>>
>>> block6:
>>> %C = getelementptr i32* %p, i32 %B
>>> - br i1 true, label %block7, label %exit
>>> + br i1 %cmpxy, label %block7, label %exit
>>>
>>> block7:
>>> %D = load i32* %C
>>>
>>>
>>> _______________________________________________
>>> llvm-commits mailing list
>>> llvm-commits at cs.uiuc.edu
>>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>>
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at cs.uiuc.edu
>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
More information about the llvm-commits
mailing list