[llvm] r265950 - This reverts commit r265913 and r265912
Sanjoy Das via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 11 08:26:18 PDT 2016
Author: sanjoy
Date: Mon Apr 11 10:26:18 2016
New Revision: 265950
URL: http://llvm.org/viewvc/llvm-project?rev=265950&view=rev
Log:
This reverts commit r265913 and r265912
See PR27315
r265913: "[IndVars] Eliminate op.with.overflow when possible"
r265912: "[SCEV] See through op.with.overflow intrinsics"
Removed:
llvm/trunk/test/Analysis/ScalarEvolution/overflow-intrinsics.ll
llvm/trunk/test/Transforms/IndVarSimplify/overflow-intrinsics.ll
Modified:
llvm/trunk/include/llvm/Analysis/ValueTracking.h
llvm/trunk/lib/Analysis/ScalarEvolution.cpp
llvm/trunk/lib/Analysis/ValueTracking.cpp
llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp
llvm/trunk/test/Transforms/IndVarSimplify/overflowcheck.ll
Modified: llvm/trunk/include/llvm/Analysis/ValueTracking.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/ValueTracking.h?rev=265950&r1=265949&r2=265950&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/ValueTracking.h (original)
+++ llvm/trunk/include/llvm/Analysis/ValueTracking.h Mon Apr 11 10:26:18 2016
@@ -18,7 +18,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Instruction.h"
-#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -326,11 +325,6 @@ namespace llvm {
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
- /// Returns true if the arithmetic part of the \p II 's result is
- /// used only along the paths control dependent on the computation
- /// not overflowing, \p II being an <op>.with.overflow intrinsic.
- bool isOverflowIntrinsicNoWrap(IntrinsicInst *II, DominatorTree &DT);
-
/// Return true if this function can prove that the instruction I will
/// always transfer execution to one of its successors (including the next
/// instruction that follows within a basic block). E.g. this is not
Modified: llvm/trunk/lib/Analysis/ScalarEvolution.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ScalarEvolution.cpp?rev=265950&r1=265949&r2=265950&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ScalarEvolution.cpp (original)
+++ llvm/trunk/lib/Analysis/ScalarEvolution.cpp Mon Apr 11 10:26:18 2016
@@ -3831,7 +3831,7 @@ struct BinaryOp {
/// Try to map \p V into a BinaryOp, and return \c None on failure.
-static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
+static Optional<BinaryOp> MatchBinaryOp(Value *V) {
auto *Op = dyn_cast<Operator>(V);
if (!Op)
return None;
@@ -3877,50 +3877,6 @@ static Optional<BinaryOp> MatchBinaryOp(
}
return BinaryOp(Op);
- case Instruction::ExtractValue: {
- auto *EVI = cast<ExtractValueInst>(Op);
- if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
- break;
-
- auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand());
- if (!CI)
- break;
-
- if (auto *F = CI->getCalledFunction())
- switch (F->getIntrinsicID()) {
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow: {
- if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT))
- return BinaryOp(Instruction::Add, CI->getArgOperand(0),
- CI->getArgOperand(1));
-
- // Now that we know that all uses of the arithmetic-result component of
- // CI are guarded by the overflow check, we can go ahead and pretend
- // that the arithmetic is non-overflowing.
- if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow)
- return BinaryOp(Instruction::Add, CI->getArgOperand(0),
- CI->getArgOperand(1), /* IsNSW = */ true,
- /* IsNUW = */ false);
- else
- return BinaryOp(Instruction::Add, CI->getArgOperand(0),
- CI->getArgOperand(1), /* IsNSW = */ false,
- /* IsNUW*/ true);
- }
-
- case Intrinsic::ssub_with_overflow:
- case Intrinsic::usub_with_overflow:
- return BinaryOp(Instruction::Sub, CI->getArgOperand(0),
- CI->getArgOperand(1));
-
- case Intrinsic::smul_with_overflow:
- case Intrinsic::umul_with_overflow:
- return BinaryOp(Instruction::Mul, CI->getArgOperand(0),
- CI->getArgOperand(1));
- default:
- break;
- }
- }
-
default:
break;
}
@@ -3997,7 +3953,7 @@ const SCEV *ScalarEvolution::createAddRe
// If the increment doesn't overflow, then neither the addrec nor
// the post-increment will overflow.
- if (auto BO = MatchBinaryOp(BEValueV, DT)) {
+ if (auto BO = MatchBinaryOp(BEValueV)) {
if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
if (BO->IsNUW)
Flags = setFlags(Flags, SCEV::FlagNUW);
@@ -4877,7 +4833,7 @@ const SCEV *ScalarEvolution::createSCEV(
return getUnknown(V);
Operator *U = cast<Operator>(V);
- if (auto BO = MatchBinaryOp(U, DT)) {
+ if (auto BO = MatchBinaryOp(U)) {
switch (BO->Opcode) {
case Instruction::Add: {
// The simple thing to do would be to just call getSCEV on both operands
@@ -4918,7 +4874,7 @@ const SCEV *ScalarEvolution::createSCEV(
else
AddOps.push_back(getSCEV(BO->RHS));
- auto NewBO = MatchBinaryOp(BO->LHS, DT);
+ auto NewBO = MatchBinaryOp(BO->LHS);
if (!NewBO || (NewBO->Opcode != Instruction::Add &&
NewBO->Opcode != Instruction::Sub)) {
AddOps.push_back(getSCEV(BO->LHS));
@@ -4948,7 +4904,7 @@ const SCEV *ScalarEvolution::createSCEV(
}
MulOps.push_back(getSCEV(BO->RHS));
- auto NewBO = MatchBinaryOp(BO->LHS, DT);
+ auto NewBO = MatchBinaryOp(BO->LHS);
if (!NewBO || NewBO->Opcode != Instruction::Mul) {
MulOps.push_back(getSCEV(BO->LHS));
break;
Modified: llvm/trunk/lib/Analysis/ValueTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ValueTracking.cpp?rev=265950&r1=265949&r2=265950&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ValueTracking.cpp (original)
+++ llvm/trunk/lib/Analysis/ValueTracking.cpp Mon Apr 11 10:26:18 2016
@@ -3253,67 +3253,6 @@ static OverflowResult computeOverflowFor
return OverflowResult::MayOverflow;
}
-bool llvm::isOverflowIntrinsicNoWrap(IntrinsicInst *II, DominatorTree &DT) {
-#ifndef NDEBUG
- auto IID = II->getIntrinsicID();
- assert((IID == Intrinsic::sadd_with_overflow ||
- IID == Intrinsic::uadd_with_overflow ||
- IID == Intrinsic::ssub_with_overflow ||
- IID == Intrinsic::usub_with_overflow ||
- IID == Intrinsic::smul_with_overflow ||
- IID == Intrinsic::umul_with_overflow) &&
- "Not an overflow intrinsic!");
-#endif
-
- SmallVector<BranchInst *, 2> GuardingBranches;
- SmallVector<ExtractValueInst *, 2> Results;
-
- for (User *U : II->users()) {
- if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
- assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
-
- if (EVI->getIndices()[0] == 0)
- Results.push_back(EVI);
- else {
- assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
-
- for (auto *U : EVI->users())
- if (auto *B = dyn_cast<BranchInst>(U)) {
- assert(B->isConditional() && "How else is it using an i1?");
- GuardingBranches.push_back(B);
- }
- }
- } else {
- // We are using the aggregate directly in a way we don't want to analyze
- // here (storing it to a global, say).
- return false;
- }
- }
-
- auto AllUsesGuardedByBranch = [&](BranchInst *BI) {
- BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
- if (!NoWrapEdge.isSingleEdge())
- return false;
-
- // Check if all users of the add are provably no-wrap.
- for (auto *Result : Results) {
- // If the extractvalue itself is not executed on overflow, the we don't
- // need to check each use separately, since domination is transitive.
- if (DT.dominates(NoWrapEdge, Result->getParent()))
- continue;
-
- for (auto &RU : Result->uses())
- if (!DT.dominates(NoWrapEdge, RU))
- return false;
- }
-
- return true;
- };
-
- return any_of(GuardingBranches, AllUsesGuardedByBranch);
-}
-
-
OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
const DataLayout &DL,
AssumptionCache *AC,
Modified: llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp?rev=265950&r1=265949&r2=265950&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp Mon Apr 11 10:26:18 2016
@@ -71,7 +71,6 @@ namespace {
bool eliminateIdentitySCEV(Instruction *UseInst, Instruction *IVOperand);
- bool eliminateOverflowIntrinsic(CallInst *CI);
bool eliminateIVUser(Instruction *UseInst, Instruction *IVOperand);
void eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand);
void eliminateIVRemainder(BinaryOperator *Rem, Value *IVOperand,
@@ -319,108 +318,6 @@ void SimplifyIndvar::eliminateIVRemainde
DeadInsts.emplace_back(Rem);
}
-bool SimplifyIndvar::eliminateOverflowIntrinsic(CallInst *CI) {
- auto *F = CI->getCalledFunction();
- if (!F)
- return false;
-
- typedef const SCEV *(ScalarEvolution::*OperationFunctionTy)(
- const SCEV *, const SCEV *, SCEV::NoWrapFlags);
- typedef const SCEV *(ScalarEvolution::*ExtensionFunctionTy)(
- const SCEV *, Type *);
-
- OperationFunctionTy Operation;
- ExtensionFunctionTy Extension;
-
- Instruction::BinaryOps RawOp;
-
- // We always have exactly one of nsw or nuw. If NoSignedOverflow is false, we
- // have nuw.
- bool NoSignedOverflow;
-
- switch (F->getIntrinsicID()) {
- default:
- return false;
-
- case Intrinsic::sadd_with_overflow:
- Operation = &ScalarEvolution::getAddExpr;
- Extension = &ScalarEvolution::getSignExtendExpr;
- RawOp = Instruction::Add;
- NoSignedOverflow = true;
- break;
-
- case Intrinsic::uadd_with_overflow:
- Operation = &ScalarEvolution::getAddExpr;
- Extension = &ScalarEvolution::getZeroExtendExpr;
- RawOp = Instruction::Add;
- NoSignedOverflow = false;
- break;
-
- case Intrinsic::ssub_with_overflow:
- Operation = &ScalarEvolution::getMinusSCEV;
- Extension = &ScalarEvolution::getSignExtendExpr;
- RawOp = Instruction::Sub;
- NoSignedOverflow = true;
- break;
-
- case Intrinsic::usub_with_overflow:
- Operation = &ScalarEvolution::getMinusSCEV;
- Extension = &ScalarEvolution::getZeroExtendExpr;
- RawOp = Instruction::Sub;
- NoSignedOverflow = false;
- break;
- }
-
- const SCEV *LHS = SE->getSCEV(CI->getArgOperand(0));
- const SCEV *RHS = SE->getSCEV(CI->getArgOperand(1));
-
- auto *NarrowTy = cast<IntegerType>(LHS->getType());
- auto *WideTy =
- IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
-
- const SCEV *A =
- (SE->*Extension)((SE->*Operation)(LHS, RHS, SCEV::FlagAnyWrap), WideTy);
- const SCEV *B =
- (SE->*Operation)((SE->*Extension)(LHS, WideTy),
- (SE->*Extension)(RHS, WideTy), SCEV::FlagAnyWrap);
-
- if (A != B)
- return false;
-
- // Proved no overflow, nuke the overflow check and, if possible, the overflow
- // intrinsic as well.
-
- BinaryOperator *NewResult = BinaryOperator::Create(
- RawOp, CI->getArgOperand(0), CI->getArgOperand(1), "", CI);
-
- if (NoSignedOverflow)
- NewResult->setHasNoSignedWrap(true);
- else
- NewResult->setHasNoUnsignedWrap(true);
-
- SmallVector<ExtractValueInst *, 4> ToDelete;
-
- for (auto *U : CI->users()) {
- if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
- if (EVI->getIndices()[0] == 1)
- EVI->replaceAllUsesWith(ConstantInt::getFalse(CI->getContext()));
- else {
- assert(EVI->getIndices()[0] == 0 && "Only two possibilities!");
- EVI->replaceAllUsesWith(NewResult);
- }
- ToDelete.push_back(EVI);
- }
- }
-
- for (auto *EVI : ToDelete)
- EVI->eraseFromParent();
-
- if (CI->use_empty())
- CI->eraseFromParent();
-
- return true;
-}
-
/// Eliminate an operation that consumes a simple IV and has no observable
/// side-effect given the range of IV values. IVOperand is guaranteed SCEVable,
/// but UseInst may not be.
@@ -438,10 +335,6 @@ bool SimplifyIndvar::eliminateIVUser(Ins
}
}
- if (auto *CI = dyn_cast<CallInst>(UseInst))
- if (eliminateOverflowIntrinsic(CI))
- return true;
-
if (eliminateIdentitySCEV(UseInst, IVOperand))
return true;
Removed: llvm/trunk/test/Analysis/ScalarEvolution/overflow-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/overflow-intrinsics.ll?rev=265949&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/overflow-intrinsics.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/overflow-intrinsics.ll (removed)
@@ -1,309 +0,0 @@
-; RUN: opt -analyze -scalar-evolution < %s | FileCheck %s
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @f_sadd_0(i8* %a) {
-; CHECK-LABEL: Classifying expressions for: @f_sadd_0
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
-; CHECK: %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %cont ]
-; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body> U: [0,16) S: [0,16)
-
- %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %tmp0 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %i.04, i32 1)
- %tmp1 = extractvalue { i32, i1 } %tmp0, 1
- br i1 %tmp1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap() #2, !nosanitize !{}
- unreachable, !nosanitize !{}
-
-cont: ; preds = %for.body
- %tmp2 = extractvalue { i32, i1 } %tmp0, 0
- %cmp = icmp slt i32 %tmp2, 16
- br i1 %cmp, label %for.body, label %for.cond.cleanup
-; CHECK: Loop %for.body: max backedge-taken count is 15
-}
-
-define void @f_sadd_1(i8* %a) {
-; CHECK-LABEL: Classifying expressions for: @f_sadd_1
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
-; CHECK: %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %cont ]
-; CHECK-NEXT: --> {0,+,1}<%for.body> U: [0,16) S: [0,16)
-
-; SCEV can prove <nsw> for the above induction variable; but it does
-; not bother so before it sees the sext below since it is not a 100%
-; obvious.
-
- %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %tmp0 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %i.04, i32 1)
- %tmp1 = extractvalue { i32, i1 } %tmp0, 1
- br i1 %tmp1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
-
- br label %cont
-
-cont: ; preds = %for.body
- %tmp2 = extractvalue { i32, i1 } %tmp0, 0
- %cmp = icmp slt i32 %tmp2, 16
- br i1 %cmp, label %for.body, label %for.cond.cleanup
-; CHECK: Loop %for.body: max backedge-taken count is 15
-}
-
-define void @f_sadd_2(i8* %a, i1* %c) {
-; CHECK-LABEL: Classifying expressions for: @f_sadd_2
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
-; CHECK: %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %cont ]
-; CHECK-NEXT: --> {0,+,1}<%for.body>
-
- %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %tmp0 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %i.04, i32 1)
- %tmp1 = extractvalue { i32, i1 } %tmp0, 1
- br i1 %tmp1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
-
- br label %cont
-
-cont: ; preds = %for.body
- %tmp2 = extractvalue { i32, i1 } %tmp0, 0
- %cond = load volatile i1, i1* %c
- br i1 %cond, label %for.body, label %for.cond.cleanup
-}
-
-define void @f_sadd_3(i8* %a, i1* %c) {
-; CHECK-LABEL: Classifying expressions for: @f_sadd_3
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
-; CHECK: %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %for.body ]
-; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body>
-
- %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %for.body ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %tmp0 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %i.04, i32 1)
- %tmp1 = extractvalue { i32, i1 } %tmp0, 1
- %tmp2 = extractvalue { i32, i1 } %tmp0, 0
- br i1 %tmp1, label %trap, label %for.body, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap() #2, !nosanitize !{}
- unreachable, !nosanitize !{}
-}
-
-define void @f_sadd_4(i8* %a, i1* %c) {
-; CHECK-LABEL: Classifying expressions for: @f_sadd_4
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
-; CHECK: %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %merge ]
-; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body>
-
- %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %merge ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %tmp0 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %i.04, i32 1)
- %tmp1 = extractvalue { i32, i1 } %tmp0, 1
- %tmp2 = extractvalue { i32, i1 } %tmp0, 0
- br i1 %tmp1, label %notrap, label %merge
-
-notrap:
- br label %merge
-
-merge:
- %tmp3 = extractvalue { i32, i1 } %tmp0, 1
- br i1 %tmp3, label %trap, label %for.body, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap() #2, !nosanitize !{}
- unreachable, !nosanitize !{}
-}
-
-define void @f_sadd_may_overflow(i8* %a, i1* %c) {
-; CHECK-LABEL: Classifying expressions for: @f_sadd_may_overflow
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
-; CHECK: %i.04 = phi i32 [ 0, %entry ], [ %tmp1, %cont ]
-; CHECK-NEXT: --> {0,+,1}<%for.body> U: full-set S: full-set
-
- %i.04 = phi i32 [ 0, %entry ], [ %tmp1, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %tmp0 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %i.04, i32 1)
- %cond1 = load volatile i1, i1* %c
- br i1 %cond1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap() #2, !nosanitize !{}
- unreachable, !nosanitize !{}
-
-cont: ; preds = %for.body
- %tmp1 = extractvalue { i32, i1 } %tmp0, 0
- %cond = load volatile i1, i1* %c
- br i1 %cond, label %for.body, label %for.cond.cleanup
-}
-
-define void @f_uadd(i8* %a) {
-; CHECK-LABEL: Classifying expressions for: @f_uadd
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
-; CHECK: %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %cont ]
-; CHECK-NEXT: --> {0,+,1}<nuw><%for.body> U: [0,16) S: [0,16)
-
- %i.04 = phi i32 [ 0, %entry ], [ %tmp2, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %tmp0 = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %i.04, i32 1)
- %tmp1 = extractvalue { i32, i1 } %tmp0, 1
- br i1 %tmp1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap(), !nosanitize !{}
- unreachable, !nosanitize !{}
-
-cont: ; preds = %for.body
- %tmp2 = extractvalue { i32, i1 } %tmp0, 0
- %cmp = icmp slt i32 %tmp2, 16
- br i1 %cmp, label %for.body, label %for.cond.cleanup
-; CHECK: Loop %for.body: max backedge-taken count is 15
-}
-
-define void @f_ssub(i8* nocapture %a) {
-; CHECK-LABEL: Classifying expressions for: @f_ssub
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
-; CHECK: %i.04 = phi i32 [ 15, %entry ], [ %tmp2, %cont ]
-; CHECK-NEXT: --> {15,+,-1}<%for.body> U: [0,16) S: [0,16)
-
- %i.04 = phi i32 [ 15, %entry ], [ %tmp2, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %tmp0 = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %i.04, i32 1)
- %tmp1 = extractvalue { i32, i1 } %tmp0, 1
- br i1 %tmp1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap(), !nosanitize !{}
- unreachable, !nosanitize !{}
-
-cont: ; preds = %for.body
- %tmp2 = extractvalue { i32, i1 } %tmp0, 0
- %cmp = icmp sgt i32 %tmp2, -1
- br i1 %cmp, label %for.body, label %for.cond.cleanup
-; CHECK: Loop %for.body: max backedge-taken count is 15
-}
-
-define void @f_usub(i8* nocapture %a) {
-; CHECK-LABEL: Classifying expressions for: @f_usub
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
-; CHECK: %i.04 = phi i32 [ 15, %entry ], [ %tmp2, %cont ]
-; CHECK-NEXT: --> {15,+,-1}<%for.body> U: [0,16) S: [0,16)
-
- %i.04 = phi i32 [ 15, %entry ], [ %tmp2, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %tmp0 = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %i.04, i32 1)
- %tmp1 = extractvalue { i32, i1 } %tmp0, 1
- br i1 %tmp1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap(), !nosanitize !{}
- unreachable, !nosanitize !{}
-
-cont: ; preds = %for.body
- %tmp2 = extractvalue { i32, i1 } %tmp0, 0
- %cmp = icmp sgt i32 %tmp2, -1
- br i1 %cmp, label %for.body, label %for.cond.cleanup
-; CHECK: Loop %for.body: max backedge-taken count is 15
-}
-
-define i32 @f_smul(i32 %val_a, i32 %val_b) {
-; CHECK-LABEL: Classifying expressions for: @f_smul
- %agg = tail call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %val_a, i32 %val_b)
-; CHECK: %mul = extractvalue { i32, i1 } %agg, 0
-; CHECK-NEXT: --> (%val_a * %val_b) U: full-set S: full-set
- %mul = extractvalue { i32, i1 } %agg, 0
- ret i32 %mul
-}
-
-define i32 @f_umul(i32 %val_a, i32 %val_b) {
-; CHECK-LABEL: Classifying expressions for: @f_umul
- %agg = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %val_a, i32 %val_b)
-; CHECK: %mul = extractvalue { i32, i1 } %agg, 0
-; CHECK-NEXT: --> (%val_a * %val_b) U: full-set S: full-set
- %mul = extractvalue { i32, i1 } %agg, 0
- ret i32 %mul
-}
-
-declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
-declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
-declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
-declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
-declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
-declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
-
-declare void @llvm.trap() #2
Removed: llvm/trunk/test/Transforms/IndVarSimplify/overflow-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/IndVarSimplify/overflow-intrinsics.ll?rev=265949&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/IndVarSimplify/overflow-intrinsics.ll (original)
+++ llvm/trunk/test/Transforms/IndVarSimplify/overflow-intrinsics.ll (removed)
@@ -1,137 +0,0 @@
-; RUN: opt -S -indvars < %s | FileCheck %s
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @f_sadd(i8* %a) {
-; CHECK-LABEL: @f_sadd(
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
- %i.04 = phi i32 [ 0, %entry ], [ %2, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %0 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %i.04, i32 1)
- %1 = extractvalue { i32, i1 } %0, 1
-; CHECK: for.body:
-; CHECK-NOT: @llvm.sadd.with.overflow
-; CHECK: br i1 false, label %trap, label %cont, !nosanitize !0
- br i1 %1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap() #2, !nosanitize !{}
- unreachable, !nosanitize !{}
-
-cont: ; preds = %for.body
- %2 = extractvalue { i32, i1 } %0, 0
- %cmp = icmp slt i32 %2, 16
- br i1 %cmp, label %for.body, label %for.cond.cleanup
-}
-
-define void @f_uadd(i8* %a) {
-; CHECK-LABEL: @f_uadd(
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
- %i.04 = phi i32 [ 0, %entry ], [ %2, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %0 = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %i.04, i32 1)
- %1 = extractvalue { i32, i1 } %0, 1
-; CHECK: for.body:
-; CHECK-NOT: @llvm.uadd.with.overflow
-; CHECK: br i1 false, label %trap, label %cont, !nosanitize !0
- br i1 %1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap(), !nosanitize !{}
- unreachable, !nosanitize !{}
-
-cont: ; preds = %for.body
- %2 = extractvalue { i32, i1 } %0, 0
- %cmp = icmp slt i32 %2, 16
- br i1 %cmp, label %for.body, label %for.cond.cleanup
-}
-
-define void @f_ssub(i8* nocapture %a) {
-; CHECK-LABEL: @f_ssub(
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
- %i.04 = phi i32 [ 15, %entry ], [ %2, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %0 = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %i.04, i32 1)
- %1 = extractvalue { i32, i1 } %0, 1
-; CHECK: for.body:
-; CHECK-NOT: @llvm.ssub.with.overflow.i32
-; CHECK: br i1 false, label %trap, label %cont, !nosanitize !0
- br i1 %1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap(), !nosanitize !{}
- unreachable, !nosanitize !{}
-
-cont: ; preds = %for.body
- %2 = extractvalue { i32, i1 } %0, 0
- %cmp = icmp sgt i32 %2, -1
- br i1 %cmp, label %for.body, label %for.cond.cleanup
-}
-
-define void @f_usub(i8* nocapture %a) {
-; CHECK-LABEL: @f_usub(
-entry:
- br label %for.body
-
-for.cond.cleanup: ; preds = %cont
- ret void
-
-for.body: ; preds = %entry, %cont
- %i.04 = phi i32 [ 15, %entry ], [ %2, %cont ]
- %idxprom = sext i32 %i.04 to i64
- %arrayidx = getelementptr inbounds i8, i8* %a, i64 %idxprom
- store i8 0, i8* %arrayidx, align 1
- %0 = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %i.04, i32 1)
- %1 = extractvalue { i32, i1 } %0, 1
-
-; It is theoretically possible to prove this, but SCEV cannot
-; represent non-unsigned-wrapping subtraction operations.
-
-; CHECK: for.body:
-; CHECK: [[COND:%[^ ]+]] = extractvalue { i32, i1 } %1, 1
-; CHECK-NEXT: br i1 [[COND]], label %trap, label %cont, !nosanitize !0
- br i1 %1, label %trap, label %cont, !nosanitize !{}
-
-trap: ; preds = %for.body
- tail call void @llvm.trap(), !nosanitize !{}
- unreachable, !nosanitize !{}
-
-cont: ; preds = %for.body
- %2 = extractvalue { i32, i1 } %0, 0
- %cmp = icmp sgt i32 %2, -1
- br i1 %cmp, label %for.body, label %for.cond.cleanup
-}
-
-declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
-declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
-declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
-declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
-declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
-declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
-
-declare void @llvm.trap() #2
Modified: llvm/trunk/test/Transforms/IndVarSimplify/overflowcheck.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/IndVarSimplify/overflowcheck.ll?rev=265950&r1=265949&r2=265950&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/IndVarSimplify/overflowcheck.ll (original)
+++ llvm/trunk/test/Transforms/IndVarSimplify/overflowcheck.ll Mon Apr 11 10:26:18 2016
@@ -10,7 +10,7 @@ target triple = "x86_64-apple-macosx"
; CHECK-LABEL: loop2:
; CHECK-NOT: extractvalue
; CHECK: add nuw
-; CHECK-NOT: @llvm.sadd.with.overflow
+; CHECK: @llvm.sadd.with.overflow
; CHECK-LABEL: loop3:
; CHECK-NOT: extractvalue
; CHECK: ret
More information about the llvm-commits
mailing list