[llvm] r350507 - [CallSite removal] Move the verifier to use `CallBase` instead of the
Chandler Carruth via llvm-commits
llvm-commits at lists.llvm.org
Sun Jan 6 23:02:34 PST 2019
Author: chandlerc
Date: Sun Jan 6 23:02:34 2019
New Revision: 350507
URL: http://llvm.org/viewvc/llvm-project?rev=350507&view=rev
Log:
[CallSite removal] Move the verifier to use `CallBase` instead of the
`CallSite` wrapper.
Mostly mechanical, but I've tried to tidy up code where it made sense to
do so.
Differential Revision: https://reviews.llvm.org/D56143
Modified:
llvm/trunk/lib/IR/Verifier.cpp
Modified: llvm/trunk/lib/IR/Verifier.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/Verifier.cpp?rev=350507&r1=350506&r2=350507&view=diff
==============================================================================
--- llvm/trunk/lib/IR/Verifier.cpp (original)
+++ llvm/trunk/lib/IR/Verifier.cpp Sun Jan 6 23:02:34 2019
@@ -65,7 +65,6 @@
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Comdat.h"
#include "llvm/IR/Constant.h"
@@ -140,21 +139,20 @@ private:
}
void Write(const Value *V) {
- if (!V)
- return;
+ if (V)
+ Write(*V);
+ }
+
+ void Write(const Value &V) {
if (isa<Instruction>(V)) {
- V->print(*OS, MST);
+ V.print(*OS, MST);
*OS << '\n';
} else {
- V->printAsOperand(*OS, true, MST);
+ V.printAsOperand(*OS, true, MST);
*OS << '\n';
}
}
- void Write(ImmutableCallSite CS) {
- Write(CS.getInstruction());
- }
-
void Write(const Metadata *MD) {
if (!MD)
return;
@@ -448,6 +446,7 @@ private:
void visitBitCastInst(BitCastInst &I);
void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
void visitPHINode(PHINode &PN);
+ void visitCallBase(CallBase &Call);
void visitUnaryOperator(UnaryOperator &U);
void visitBinaryOperator(BinaryOperator &B);
void visitICmpInst(ICmpInst &IC);
@@ -471,7 +470,7 @@ private:
void visitSelectInst(SelectInst &SI);
void visitUserOp1(Instruction &I);
void visitUserOp2(Instruction &I) { visitUserOp1(I); }
- void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS);
+ void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
@@ -491,8 +490,7 @@ private:
void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
void visitCleanupReturnInst(CleanupReturnInst &CRI);
- void verifyCallSite(CallSite CS);
- void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal);
+ void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
void verifySwiftErrorValue(const Value *SwiftErrorVal);
void verifyMustTailCall(CallInst &CI);
bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
@@ -507,7 +505,7 @@ private:
void visitConstantExprsRecursively(const Constant *EntryC);
void visitConstantExpr(const ConstantExpr *CE);
- void verifyStatepoint(ImmutableCallSite CS);
+ void verifyStatepoint(const CallBase &Call);
void verifyFrameRecoverIndices();
void verifySiblingFuncletUnwinds();
@@ -1893,134 +1891,136 @@ bool Verifier::verifyAttributeCount(Attr
}
/// Verify that statepoint intrinsic is well formed.
-void Verifier::verifyStatepoint(ImmutableCallSite CS) {
- assert(CS.getCalledFunction() &&
- CS.getCalledFunction()->getIntrinsicID() ==
- Intrinsic::experimental_gc_statepoint);
-
- const Instruction &CI = *CS.getInstruction();
+void Verifier::verifyStatepoint(const CallBase &Call) {
+ assert(Call.getCalledFunction() &&
+ Call.getCalledFunction()->getIntrinsicID() ==
+ Intrinsic::experimental_gc_statepoint);
- Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() &&
- !CS.onlyAccessesArgMemory(),
+ Assert(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
+ !Call.onlyAccessesArgMemory(),
"gc.statepoint must read and write all memory to preserve "
"reordering restrictions required by safepoint semantics",
- &CI);
+ Call);
- const Value *IDV = CS.getArgument(0);
+ const Value *IDV = Call.getArgOperand(0);
Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer",
- &CI);
+ Call);
- const Value *NumPatchBytesV = CS.getArgument(1);
+ const Value *NumPatchBytesV = Call.getArgOperand(1);
Assert(isa<ConstantInt>(NumPatchBytesV),
"gc.statepoint number of patchable bytes must be a constant integer",
- &CI);
+ Call);
const int64_t NumPatchBytes =
cast<ConstantInt>(NumPatchBytesV)->getSExtValue();
assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
- Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be "
- "positive",
- &CI);
+ Assert(NumPatchBytes >= 0,
+ "gc.statepoint number of patchable bytes must be "
+ "positive",
+ Call);
- const Value *Target = CS.getArgument(2);
+ const Value *Target = Call.getArgOperand(2);
auto *PT = dyn_cast<PointerType>(Target->getType());
Assert(PT && PT->getElementType()->isFunctionTy(),
- "gc.statepoint callee must be of function pointer type", &CI, Target);
+ "gc.statepoint callee must be of function pointer type", Call, Target);
FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
- const Value *NumCallArgsV = CS.getArgument(3);
+ const Value *NumCallArgsV = Call.getArgOperand(3);
Assert(isa<ConstantInt>(NumCallArgsV),
"gc.statepoint number of arguments to underlying call "
"must be constant integer",
- &CI);
+ Call);
const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue();
Assert(NumCallArgs >= 0,
"gc.statepoint number of arguments to underlying call "
"must be positive",
- &CI);
+ Call);
const int NumParams = (int)TargetFuncType->getNumParams();
if (TargetFuncType->isVarArg()) {
Assert(NumCallArgs >= NumParams,
- "gc.statepoint mismatch in number of vararg call args", &CI);
+ "gc.statepoint mismatch in number of vararg call args", Call);
// TODO: Remove this limitation
Assert(TargetFuncType->getReturnType()->isVoidTy(),
"gc.statepoint doesn't support wrapping non-void "
"vararg functions yet",
- &CI);
+ Call);
} else
Assert(NumCallArgs == NumParams,
- "gc.statepoint mismatch in number of call args", &CI);
+ "gc.statepoint mismatch in number of call args", Call);
- const Value *FlagsV = CS.getArgument(4);
+ const Value *FlagsV = Call.getArgOperand(4);
Assert(isa<ConstantInt>(FlagsV),
- "gc.statepoint flags must be constant integer", &CI);
+ "gc.statepoint flags must be constant integer", Call);
const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue();
Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
- "unknown flag used in gc.statepoint flags argument", &CI);
+ "unknown flag used in gc.statepoint flags argument", Call);
// Verify that the types of the call parameter arguments match
// the type of the wrapped callee.
- AttributeList Attrs = CS.getAttributes();
+ AttributeList Attrs = Call.getAttributes();
for (int i = 0; i < NumParams; i++) {
Type *ParamType = TargetFuncType->getParamType(i);
- Type *ArgType = CS.getArgument(5 + i)->getType();
+ Type *ArgType = Call.getArgOperand(5 + i)->getType();
Assert(ArgType == ParamType,
"gc.statepoint call argument does not match wrapped "
"function type",
- &CI);
+ Call);
if (TargetFuncType->isVarArg()) {
AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i);
Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
- "Attribute 'sret' cannot be used for vararg call arguments!", &CI);
+ "Attribute 'sret' cannot be used for vararg call arguments!",
+ Call);
}
}
const int EndCallArgsInx = 4 + NumCallArgs;
- const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1);
+ const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
Assert(isa<ConstantInt>(NumTransitionArgsV),
"gc.statepoint number of transition arguments "
"must be constant integer",
- &CI);
+ Call);
const int NumTransitionArgs =
cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
Assert(NumTransitionArgs >= 0,
- "gc.statepoint number of transition arguments must be positive", &CI);
+ "gc.statepoint number of transition arguments must be positive", Call);
const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
- const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1);
+ const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
Assert(isa<ConstantInt>(NumDeoptArgsV),
"gc.statepoint number of deoptimization arguments "
"must be constant integer",
- &CI);
+ Call);
const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
- Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments "
- "must be positive",
- &CI);
+ Assert(NumDeoptArgs >= 0,
+ "gc.statepoint number of deoptimization arguments "
+ "must be positive",
+ Call);
const int ExpectedNumArgs =
7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
- Assert(ExpectedNumArgs <= (int)CS.arg_size(),
- "gc.statepoint too few arguments according to length fields", &CI);
+ Assert(ExpectedNumArgs <= (int)Call.arg_size(),
+ "gc.statepoint too few arguments according to length fields", Call);
// Check that the only uses of this gc.statepoint are gc.result or
// gc.relocate calls which are tied to this statepoint and thus part
// of the same statepoint sequence
- for (const User *U : CI.users()) {
- const CallInst *Call = dyn_cast<const CallInst>(U);
- Assert(Call, "illegal use of statepoint token", &CI, U);
- if (!Call) continue;
- Assert(isa<GCRelocateInst>(Call) || isa<GCResultInst>(Call),
+ for (const User *U : Call.users()) {
+ const CallInst *UserCall = dyn_cast<const CallInst>(U);
+ Assert(UserCall, "illegal use of statepoint token", Call, U);
+ if (!UserCall)
+ continue;
+ Assert(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
"gc.result or gc.relocate are the only value uses "
"of a gc.statepoint",
- &CI, U);
- if (isa<GCResultInst>(Call)) {
- Assert(Call->getArgOperand(0) == &CI,
- "gc.result connected to wrong gc.statepoint", &CI, Call);
+ Call, U);
+ if (isa<GCResultInst>(UserCall)) {
+ Assert(UserCall->getArgOperand(0) == &Call,
+ "gc.result connected to wrong gc.statepoint", Call, UserCall);
} else if (isa<GCRelocateInst>(Call)) {
- Assert(Call->getArgOperand(0) == &CI,
- "gc.relocate connected to wrong gc.statepoint", &CI, Call);
+ Assert(UserCall->getArgOperand(0) == &Call,
+ "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
}
}
@@ -2745,77 +2745,79 @@ void Verifier::visitPHINode(PHINode &PN)
visitInstruction(PN);
}
-void Verifier::verifyCallSite(CallSite CS) {
- Instruction *I = CS.getInstruction();
-
- Assert(CS.getCalledValue()->getType()->isPointerTy(),
- "Called function must be a pointer!", I);
- PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType());
+void Verifier::visitCallBase(CallBase &Call) {
+ Assert(Call.getCalledValue()->getType()->isPointerTy(),
+ "Called function must be a pointer!", Call);
+ PointerType *FPTy = cast<PointerType>(Call.getCalledValue()->getType());
Assert(FPTy->getElementType()->isFunctionTy(),
- "Called function is not pointer to function type!", I);
+ "Called function is not pointer to function type!", Call);
- Assert(FPTy->getElementType() == CS.getFunctionType(),
- "Called function is not the same type as the call!", I);
+ Assert(FPTy->getElementType() == Call.getFunctionType(),
+ "Called function is not the same type as the call!", Call);
- FunctionType *FTy = CS.getFunctionType();
+ FunctionType *FTy = Call.getFunctionType();
// Verify that the correct number of arguments are being passed
if (FTy->isVarArg())
- Assert(CS.arg_size() >= FTy->getNumParams(),
- "Called function requires more parameters than were provided!", I);
+ Assert(Call.arg_size() >= FTy->getNumParams(),
+ "Called function requires more parameters than were provided!",
+ Call);
else
- Assert(CS.arg_size() == FTy->getNumParams(),
- "Incorrect number of arguments passed to called function!", I);
+ Assert(Call.arg_size() == FTy->getNumParams(),
+ "Incorrect number of arguments passed to called function!", Call);
// Verify that all arguments to the call match the function type.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- Assert(CS.getArgument(i)->getType() == FTy->getParamType(i),
+ Assert(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
"Call parameter type does not match function signature!",
- CS.getArgument(i), FTy->getParamType(i), I);
+ Call.getArgOperand(i), FTy->getParamType(i), Call);
- AttributeList Attrs = CS.getAttributes();
+ AttributeList Attrs = Call.getAttributes();
- Assert(verifyAttributeCount(Attrs, CS.arg_size()),
- "Attribute after last parameter!", I);
+ Assert(verifyAttributeCount(Attrs, Call.arg_size()),
+ "Attribute after last parameter!", Call);
if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
// Don't allow speculatable on call sites, unless the underlying function
// declaration is also speculatable.
- Function *Callee
- = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
+ Function *Callee =
+ dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
Assert(Callee && Callee->isSpeculatable(),
- "speculatable attribute may not apply to call sites", I);
+ "speculatable attribute may not apply to call sites", Call);
}
// Verify call attributes.
- verifyFunctionAttrs(FTy, Attrs, I);
+ verifyFunctionAttrs(FTy, Attrs, &Call);
// Conservatively check the inalloca argument.
// We have a bug if we can find that there is an underlying alloca without
// inalloca.
- if (CS.hasInAllocaArgument()) {
- Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1);
+ if (Call.hasInAllocaArgument()) {
+ Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
Assert(AI->isUsedWithInAlloca(),
- "inalloca argument for call has mismatched alloca", AI, I);
+ "inalloca argument for call has mismatched alloca", AI, Call);
}
// For each argument of the callsite, if it has the swifterror argument,
// make sure the underlying alloca/parameter it comes from has a swifterror as
// well.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- if (CS.paramHasAttr(i, Attribute::SwiftError)) {
- Value *SwiftErrorArg = CS.getArgument(i);
+ if (Call.paramHasAttr(i, Attribute::SwiftError)) {
+ Value *SwiftErrorArg = Call.getArgOperand(i);
if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
Assert(AI->isSwiftError(),
- "swifterror argument for call has mismatched alloca", AI, I);
+ "swifterror argument for call has mismatched alloca", AI, Call);
continue;
}
auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
- Assert(ArgI, "swifterror argument should come from an alloca or parameter", SwiftErrorArg, I);
+ Assert(ArgI,
+ "swifterror argument should come from an alloca or parameter",
+ SwiftErrorArg, Call);
Assert(ArgI->hasSwiftErrorAttr(),
- "swifterror argument for call has mismatched parameter", ArgI, I);
+ "swifterror argument for call has mismatched parameter", ArgI,
+ Call);
}
if (FTy->isVarArg()) {
@@ -2831,95 +2833,97 @@ void Verifier::verifyCallSite(CallSite C
}
// Check attributes on the varargs part.
- for (unsigned Idx = FTy->getNumParams(); Idx < CS.arg_size(); ++Idx) {
- Type *Ty = CS.getArgument(Idx)->getType();
+ for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
+ Type *Ty = Call.getArgOperand(Idx)->getType();
AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
- verifyParameterAttrs(ArgAttrs, Ty, I);
+ verifyParameterAttrs(ArgAttrs, Ty, &Call);
if (ArgAttrs.hasAttribute(Attribute::Nest)) {
- Assert(!SawNest, "More than one parameter has attribute nest!", I);
+ Assert(!SawNest, "More than one parameter has attribute nest!", Call);
SawNest = true;
}
if (ArgAttrs.hasAttribute(Attribute::Returned)) {
Assert(!SawReturned, "More than one parameter has attribute returned!",
- I);
+ Call);
Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
"Incompatible argument and return types for 'returned' "
"attribute",
- I);
+ Call);
SawReturned = true;
}
// Statepoint intrinsic is vararg but the wrapped function may be not.
// Allow sret here and check the wrapped function in verifyStatepoint.
- if (CS.getCalledFunction() == nullptr ||
- CS.getCalledFunction()->getIntrinsicID() !=
- Intrinsic::experimental_gc_statepoint)
+ if (!Call.getCalledFunction() ||
+ Call.getCalledFunction()->getIntrinsicID() !=
+ Intrinsic::experimental_gc_statepoint)
Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
- "Attribute 'sret' cannot be used for vararg call arguments!", I);
+ "Attribute 'sret' cannot be used for vararg call arguments!",
+ Call);
if (ArgAttrs.hasAttribute(Attribute::InAlloca))
- Assert(Idx == CS.arg_size() - 1, "inalloca isn't on the last argument!",
- I);
+ Assert(Idx == Call.arg_size() - 1,
+ "inalloca isn't on the last argument!", Call);
}
}
// Verify that there's no metadata unless it's a direct call to an intrinsic.
- if (CS.getCalledFunction() == nullptr ||
- !CS.getCalledFunction()->getName().startswith("llvm.")) {
+ if (!Call.getCalledFunction() ||
+ !Call.getCalledFunction()->getName().startswith("llvm.")) {
for (Type *ParamTy : FTy->params()) {
Assert(!ParamTy->isMetadataTy(),
- "Function has metadata parameter but isn't an intrinsic", I);
+ "Function has metadata parameter but isn't an intrinsic", Call);
Assert(!ParamTy->isTokenTy(),
- "Function has token parameter but isn't an intrinsic", I);
+ "Function has token parameter but isn't an intrinsic", Call);
}
}
// Verify that indirect calls don't return tokens.
- if (CS.getCalledFunction() == nullptr)
+ if (!Call.getCalledFunction())
Assert(!FTy->getReturnType()->isTokenTy(),
"Return type cannot be token for indirect call!");
- if (Function *F = CS.getCalledFunction())
+ if (Function *F = Call.getCalledFunction())
if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
- visitIntrinsicCallSite(ID, CS);
+ visitIntrinsicCall(ID, Call);
// Verify that a callsite has at most one "deopt", at most one "funclet" and
// at most one "gc-transition" operand bundle.
bool FoundDeoptBundle = false, FoundFuncletBundle = false,
FoundGCTransitionBundle = false;
- for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) {
- OperandBundleUse BU = CS.getOperandBundleAt(i);
+ for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
+ OperandBundleUse BU = Call.getOperandBundleAt(i);
uint32_t Tag = BU.getTagID();
if (Tag == LLVMContext::OB_deopt) {
- Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I);
+ Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
FoundDeoptBundle = true;
} else if (Tag == LLVMContext::OB_gc_transition) {
Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
- I);
+ Call);
FoundGCTransitionBundle = true;
} else if (Tag == LLVMContext::OB_funclet) {
- Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I);
+ Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
FoundFuncletBundle = true;
Assert(BU.Inputs.size() == 1,
- "Expected exactly one funclet bundle operand", I);
+ "Expected exactly one funclet bundle operand", Call);
Assert(isa<FuncletPadInst>(BU.Inputs.front()),
"Funclet bundle operands should correspond to a FuncletPadInst",
- I);
+ Call);
}
}
// Verify that each inlinable callsite of a debug-info-bearing function in a
// debug-info-bearing function has a debug location attached to it. Failure to
// do so causes assertion failures when the inliner sets up inline scope info.
- if (I->getFunction()->getSubprogram() && CS.getCalledFunction() &&
- CS.getCalledFunction()->getSubprogram())
- AssertDI(I->getDebugLoc(), "inlinable function call in a function with "
- "debug info must have a !dbg location",
- I);
+ if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
+ Call.getCalledFunction()->getSubprogram())
+ AssertDI(Call.getDebugLoc(),
+ "inlinable function call in a function with "
+ "debug info must have a !dbg location",
+ Call);
- visitInstruction(*I);
+ visitInstruction(Call);
}
/// Two types are "congruent" if they are identical, or if they are both pointer
@@ -3014,14 +3018,14 @@ void Verifier::verifyMustTailCall(CallIn
}
void Verifier::visitCallInst(CallInst &CI) {
- verifyCallSite(&CI);
+ visitCallBase(CI);
if (CI.isMustTailCall())
verifyMustTailCall(CI);
}
void Verifier::visitInvokeInst(InvokeInst &II) {
- verifyCallSite(&II);
+ visitCallBase(II);
// Verify that the first non-PHI instruction of the unwind destination is an
// exception handling instruction.
@@ -3330,16 +3334,15 @@ void Verifier::visitStoreInst(StoreInst
}
/// Check that SwiftErrorVal is used as a swifterror argument in CS.
-void Verifier::verifySwiftErrorCallSite(CallSite CS,
- const Value *SwiftErrorVal) {
+void Verifier::verifySwiftErrorCall(CallBase &Call,
+ const Value *SwiftErrorVal) {
unsigned Idx = 0;
- for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
- I != E; ++I, ++Idx) {
+ for (auto I = Call.arg_begin(), E = Call.arg_end(); I != E; ++I, ++Idx) {
if (*I == SwiftErrorVal) {
- Assert(CS.paramHasAttr(Idx, Attribute::SwiftError),
+ Assert(Call.paramHasAttr(Idx, Attribute::SwiftError),
"swifterror value when used in a callsite should be marked "
"with swifterror attribute",
- SwiftErrorVal, CS);
+ SwiftErrorVal, Call);
}
}
}
@@ -3358,10 +3361,8 @@ void Verifier::verifySwiftErrorValue(con
Assert(StoreI->getOperand(1) == SwiftErrorVal,
"swifterror value should be the second operand when used "
"by stores", SwiftErrorVal, U);
- if (auto CallI = dyn_cast<CallInst>(U))
- verifySwiftErrorCallSite(const_cast<CallInst*>(CallI), SwiftErrorVal);
- if (auto II = dyn_cast<InvokeInst>(U))
- verifySwiftErrorCallSite(const_cast<InvokeInst*>(II), SwiftErrorVal);
+ if (auto *Call = dyn_cast<CallBase>(U))
+ verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
}
}
@@ -4078,8 +4079,8 @@ void Verifier::visitInstruction(Instruct
}
/// Allow intrinsics to be verified in different ways.
-void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
- Function *IF = CS.getCalledFunction();
+void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
+ Function *IF = Call.getCalledFunction();
Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
IF);
@@ -4125,15 +4126,15 @@ void Verifier::visitIntrinsicCallSite(In
// If the intrinsic takes MDNode arguments, verify that they are either global
// or are local to *this* function.
- for (Value *V : CS.args())
+ for (Value *V : Call.args())
if (auto *MD = dyn_cast<MetadataAsValue>(V))
- visitMetadataAsValue(*MD, CS.getCaller());
+ visitMetadataAsValue(*MD, Call.getCaller());
switch (ID) {
default:
break;
case Intrinsic::coro_id: {
- auto *InfoArg = CS.getArgOperand(3)->stripPointerCasts();
+ auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
if (isa<ConstantPointerNull>(InfoArg))
break;
auto *GV = dyn_cast<GlobalVariable>(InfoArg);
@@ -4148,10 +4149,10 @@ void Verifier::visitIntrinsicCallSite(In
}
case Intrinsic::ctlz: // llvm.ctlz
case Intrinsic::cttz: // llvm.cttz
- Assert(isa<ConstantInt>(CS.getArgOperand(1)),
+ Assert(isa<ConstantInt>(Call.getArgOperand(1)),
"is_zero_undef argument of bit counting intrinsics must be a "
"constant int",
- CS);
+ Call);
break;
case Intrinsic::experimental_constrained_fadd:
case Intrinsic::experimental_constrained_fsub:
@@ -4177,59 +4178,58 @@ void Verifier::visitIntrinsicCallSite(In
case Intrinsic::experimental_constrained_floor:
case Intrinsic::experimental_constrained_round:
case Intrinsic::experimental_constrained_trunc:
- visitConstrainedFPIntrinsic(
- cast<ConstrainedFPIntrinsic>(*CS.getInstruction()));
+ visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
break;
case Intrinsic::dbg_declare: // llvm.dbg.declare
- Assert(isa<MetadataAsValue>(CS.getArgOperand(0)),
- "invalid llvm.dbg.declare intrinsic call 1", CS);
- visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(*CS.getInstruction()));
+ Assert(isa<MetadataAsValue>(Call.getArgOperand(0)),
+ "invalid llvm.dbg.declare intrinsic call 1", Call);
+ visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
break;
case Intrinsic::dbg_addr: // llvm.dbg.addr
- visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(*CS.getInstruction()));
+ visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
break;
case Intrinsic::dbg_value: // llvm.dbg.value
- visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(*CS.getInstruction()));
+ visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
break;
case Intrinsic::dbg_label: // llvm.dbg.label
- visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(*CS.getInstruction()));
+ visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
break;
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset: {
- const auto *MI = cast<MemIntrinsic>(CS.getInstruction());
+ const auto *MI = cast<MemIntrinsic>(&Call);
auto IsValidAlignment = [&](unsigned Alignment) -> bool {
return Alignment == 0 || isPowerOf2_32(Alignment);
};
Assert(IsValidAlignment(MI->getDestAlignment()),
"alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
- CS);
+ Call);
if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
Assert(IsValidAlignment(MTI->getSourceAlignment()),
"alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
- CS);
+ Call);
}
- Assert(isa<ConstantInt>(CS.getArgOperand(3)),
+ Assert(isa<ConstantInt>(Call.getArgOperand(3)),
"isvolatile argument of memory intrinsics must be a constant int",
- CS);
+ Call);
break;
}
case Intrinsic::memcpy_element_unordered_atomic:
case Intrinsic::memmove_element_unordered_atomic:
case Intrinsic::memset_element_unordered_atomic: {
- const auto *AMI = cast<AtomicMemIntrinsic>(CS.getInstruction());
+ const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
ConstantInt *ElementSizeCI =
dyn_cast<ConstantInt>(AMI->getRawElementSizeInBytes());
Assert(ElementSizeCI,
"element size of the element-wise unordered atomic memory "
"intrinsic must be a constant int",
- CS);
+ Call);
const APInt &ElementSizeVal = ElementSizeCI->getValue();
Assert(ElementSizeVal.isPowerOf2(),
"element size of the element-wise atomic memory intrinsic "
"must be a power of 2",
- CS);
+ Call);
if (auto *LengthCI = dyn_cast<ConstantInt>(AMI->getLength())) {
uint64_t Length = LengthCI->getZExtValue();
@@ -4237,7 +4237,7 @@ void Verifier::visitIntrinsicCallSite(In
Assert((Length % ElementSize) == 0,
"constant length must be a multiple of the element size in the "
"element-wise atomic memory intrinsic",
- CS);
+ Call);
}
auto IsValidAlignment = [&](uint64_t Alignment) {
@@ -4245,11 +4245,11 @@ void Verifier::visitIntrinsicCallSite(In
};
uint64_t DstAlignment = AMI->getDestAlignment();
Assert(IsValidAlignment(DstAlignment),
- "incorrect alignment of the destination argument", CS);
+ "incorrect alignment of the destination argument", Call);
if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
uint64_t SrcAlignment = AMT->getSourceAlignment();
Assert(IsValidAlignment(SrcAlignment),
- "incorrect alignment of the source argument", CS);
+ "incorrect alignment of the source argument", Call);
}
break;
}
@@ -4258,76 +4258,76 @@ void Verifier::visitIntrinsicCallSite(In
case Intrinsic::gcread:
if (ID == Intrinsic::gcroot) {
AllocaInst *AI =
- dyn_cast<AllocaInst>(CS.getArgOperand(0)->stripPointerCasts());
- Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS);
- Assert(isa<Constant>(CS.getArgOperand(1)),
- "llvm.gcroot parameter #2 must be a constant.", CS);
+ dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
+ Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
+ Assert(isa<Constant>(Call.getArgOperand(1)),
+ "llvm.gcroot parameter #2 must be a constant.", Call);
if (!AI->getAllocatedType()->isPointerTy()) {
- Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)),
+ Assert(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
"llvm.gcroot parameter #1 must either be a pointer alloca, "
"or argument #2 must be a non-null constant.",
- CS);
+ Call);
}
}
- Assert(CS.getParent()->getParent()->hasGC(),
- "Enclosing function does not use GC.", CS);
+ Assert(Call.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", Call);
break;
case Intrinsic::init_trampoline:
- Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()),
+ Assert(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
"llvm.init_trampoline parameter #2 must resolve to a function.",
- CS);
+ Call);
break;
case Intrinsic::prefetch:
- Assert(isa<ConstantInt>(CS.getArgOperand(1)) &&
- isa<ConstantInt>(CS.getArgOperand(2)) &&
- cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 &&
- cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4,
- "invalid arguments to llvm.prefetch", CS);
+ Assert(isa<ConstantInt>(Call.getArgOperand(1)) &&
+ isa<ConstantInt>(Call.getArgOperand(2)) &&
+ cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
+ cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
+ "invalid arguments to llvm.prefetch", Call);
break;
case Intrinsic::stackprotector:
- Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()),
- "llvm.stackprotector parameter #2 must resolve to an alloca.", CS);
+ Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
+ "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
break;
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
- Assert(isa<ConstantInt>(CS.getArgOperand(0)),
+ Assert(isa<ConstantInt>(Call.getArgOperand(0)),
"size argument of memory use markers must be a constant integer",
- CS);
+ Call);
break;
case Intrinsic::invariant_end:
- Assert(isa<ConstantInt>(CS.getArgOperand(1)),
- "llvm.invariant.end parameter #2 must be a constant integer", CS);
+ Assert(isa<ConstantInt>(Call.getArgOperand(1)),
+ "llvm.invariant.end parameter #2 must be a constant integer", Call);
break;
case Intrinsic::localescape: {
- BasicBlock *BB = CS.getParent();
+ BasicBlock *BB = Call.getParent();
Assert(BB == &BB->getParent()->front(),
- "llvm.localescape used outside of entry block", CS);
+ "llvm.localescape used outside of entry block", Call);
Assert(!SawFrameEscape,
- "multiple calls to llvm.localescape in one function", CS);
- for (Value *Arg : CS.args()) {
+ "multiple calls to llvm.localescape in one function", Call);
+ for (Value *Arg : Call.args()) {
if (isa<ConstantPointerNull>(Arg))
continue; // Null values are allowed as placeholders.
auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
Assert(AI && AI->isStaticAlloca(),
- "llvm.localescape only accepts static allocas", CS);
+ "llvm.localescape only accepts static allocas", Call);
}
- FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands();
+ FrameEscapeInfo[BB->getParent()].first = Call.getNumArgOperands();
SawFrameEscape = true;
break;
}
case Intrinsic::localrecover: {
- Value *FnArg = CS.getArgOperand(0)->stripPointerCasts();
+ Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
Function *Fn = dyn_cast<Function>(FnArg);
Assert(Fn && !Fn->isDeclaration(),
"llvm.localrecover first "
"argument must be function defined in this module",
- CS);
- auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2));
+ Call);
+ auto *IdxArg = dyn_cast<ConstantInt>(Call.getArgOperand(2));
Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int",
- CS);
+ Call);
auto &Entry = FrameEscapeInfo[Fn];
Entry.second = unsigned(
std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
@@ -4335,45 +4335,46 @@ void Verifier::visitIntrinsicCallSite(In
}
case Intrinsic::experimental_gc_statepoint:
- Assert(!CS.isInlineAsm(),
- "gc.statepoint support for inline assembly unimplemented", CS);
- Assert(CS.getParent()->getParent()->hasGC(),
- "Enclosing function does not use GC.", CS);
+ if (auto *CI = dyn_cast<CallInst>(&Call))
+ Assert(!CI->isInlineAsm(),
+ "gc.statepoint support for inline assembly unimplemented", CI);
+ Assert(Call.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", Call);
- verifyStatepoint(CS);
+ verifyStatepoint(Call);
break;
case Intrinsic::experimental_gc_result: {
- Assert(CS.getParent()->getParent()->hasGC(),
- "Enclosing function does not use GC.", CS);
+ Assert(Call.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", Call);
// Are we tied to a statepoint properly?
- CallSite StatepointCS(CS.getArgOperand(0));
+ const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
const Function *StatepointFn =
- StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr;
+ StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
Assert(StatepointFn && StatepointFn->isDeclaration() &&
StatepointFn->getIntrinsicID() ==
Intrinsic::experimental_gc_statepoint,
- "gc.result operand #1 must be from a statepoint", CS,
- CS.getArgOperand(0));
+ "gc.result operand #1 must be from a statepoint", Call,
+ Call.getArgOperand(0));
// Assert that result type matches wrapped callee.
- const Value *Target = StatepointCS.getArgument(2);
+ const Value *Target = StatepointCall->getArgOperand(2);
auto *PT = cast<PointerType>(Target->getType());
auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
- Assert(CS.getType() == TargetFuncType->getReturnType(),
- "gc.result result type does not match wrapped callee", CS);
+ Assert(Call.getType() == TargetFuncType->getReturnType(),
+ "gc.result result type does not match wrapped callee", Call);
break;
}
case Intrinsic::experimental_gc_relocate: {
- Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS);
+ Assert(Call.getNumArgOperands() == 3, "wrong number of arguments", Call);
- Assert(isa<PointerType>(CS.getType()->getScalarType()),
- "gc.relocate must return a pointer or a vector of pointers", CS);
+ Assert(isa<PointerType>(Call.getType()->getScalarType()),
+ "gc.relocate must return a pointer or a vector of pointers", Call);
// Check that this relocate is correctly tied to the statepoint
// This is case for relocate on the unwinding path of an invoke statepoint
if (LandingPadInst *LandingPad =
- dyn_cast<LandingPadInst>(CS.getArgOperand(0))) {
+ dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
const BasicBlock *InvokeBB =
LandingPad->getParent()->getUniquePredecessor();
@@ -4386,161 +4387,160 @@ void Verifier::visitIntrinsicCallSite(In
InvokeBB);
Assert(isStatepoint(InvokeBB->getTerminator()),
"gc relocate should be linked to a statepoint", InvokeBB);
- }
- else {
+ } else {
// In all other cases relocate should be tied to the statepoint directly.
// This covers relocates on a normal return path of invoke statepoint and
// relocates of a call statepoint.
- auto Token = CS.getArgOperand(0);
+ auto Token = Call.getArgOperand(0);
Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
- "gc relocate is incorrectly tied to the statepoint", CS, Token);
+ "gc relocate is incorrectly tied to the statepoint", Call, Token);
}
// Verify rest of the relocate arguments.
-
- ImmutableCallSite StatepointCS(
- cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint());
+ const CallBase &StatepointCall =
+ *cast<CallBase>(cast<GCRelocateInst>(Call).getStatepoint());
// Both the base and derived must be piped through the safepoint.
- Value* Base = CS.getArgOperand(1);
+ Value *Base = Call.getArgOperand(1);
Assert(isa<ConstantInt>(Base),
- "gc.relocate operand #2 must be integer offset", CS);
+ "gc.relocate operand #2 must be integer offset", Call);
- Value* Derived = CS.getArgOperand(2);
+ Value *Derived = Call.getArgOperand(2);
Assert(isa<ConstantInt>(Derived),
- "gc.relocate operand #3 must be integer offset", CS);
+ "gc.relocate operand #3 must be integer offset", Call);
const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
// Check the bounds
- Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(),
- "gc.relocate: statepoint base index out of bounds", CS);
- Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(),
- "gc.relocate: statepoint derived index out of bounds", CS);
+ Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCall.arg_size(),
+ "gc.relocate: statepoint base index out of bounds", Call);
+ Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCall.arg_size(),
+ "gc.relocate: statepoint derived index out of bounds", Call);
// Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
// section of the statepoint's argument.
- Assert(StatepointCS.arg_size() > 0,
+ Assert(StatepointCall.arg_size() > 0,
"gc.statepoint: insufficient arguments");
- Assert(isa<ConstantInt>(StatepointCS.getArgument(3)),
+ Assert(isa<ConstantInt>(StatepointCall.getArgOperand(3)),
"gc.statement: number of call arguments must be constant integer");
const unsigned NumCallArgs =
- cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue();
- Assert(StatepointCS.arg_size() > NumCallArgs + 5,
+ cast<ConstantInt>(StatepointCall.getArgOperand(3))->getZExtValue();
+ Assert(StatepointCall.arg_size() > NumCallArgs + 5,
"gc.statepoint: mismatch in number of call arguments");
- Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)),
+ Assert(isa<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5)),
"gc.statepoint: number of transition arguments must be "
"a constant integer");
const int NumTransitionArgs =
- cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5))
+ cast<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5))
->getZExtValue();
const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
- Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)),
+ Assert(isa<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart)),
"gc.statepoint: number of deoptimization arguments must be "
"a constant integer");
const int NumDeoptArgs =
- cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart))
+ cast<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart))
->getZExtValue();
const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
- const int GCParamArgsEnd = StatepointCS.arg_size();
+ const int GCParamArgsEnd = StatepointCall.arg_size();
Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
"gc.relocate: statepoint base index doesn't fall within the "
"'gc parameters' section of the statepoint call",
- CS);
+ Call);
Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
"gc.relocate: statepoint derived index doesn't fall within the "
"'gc parameters' section of the statepoint call",
- CS);
+ Call);
// Relocated value must be either a pointer type or vector-of-pointer type,
// but gc_relocate does not need to return the same pointer type as the
// relocated pointer. It can be casted to the correct type later if it's
// desired. However, they must have the same address space and 'vectorness'
- GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
+ GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
- "gc.relocate: relocated value must be a gc pointer", CS);
+ "gc.relocate: relocated value must be a gc pointer", Call);
- auto ResultType = CS.getType();
+ auto ResultType = Call.getType();
auto DerivedType = Relocate.getDerivedPtr()->getType();
Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
"gc.relocate: vector relocates to vector and pointer to pointer",
- CS);
+ Call);
Assert(
ResultType->getPointerAddressSpace() ==
DerivedType->getPointerAddressSpace(),
"gc.relocate: relocating a pointer shouldn't change its address space",
- CS);
+ Call);
break;
}
case Intrinsic::eh_exceptioncode:
case Intrinsic::eh_exceptionpointer: {
- Assert(isa<CatchPadInst>(CS.getArgOperand(0)),
- "eh.exceptionpointer argument must be a catchpad", CS);
+ Assert(isa<CatchPadInst>(Call.getArgOperand(0)),
+ "eh.exceptionpointer argument must be a catchpad", Call);
break;
}
case Intrinsic::masked_load: {
- Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS);
+ Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector",
+ Call);
- Value *Ptr = CS.getArgOperand(0);
- //Value *Alignment = CS.getArgOperand(1);
- Value *Mask = CS.getArgOperand(2);
- Value *PassThru = CS.getArgOperand(3);
- Assert(Mask->getType()->isVectorTy(),
- "masked_load: mask must be vector", CS);
+ Value *Ptr = Call.getArgOperand(0);
+ // Value *Alignment = Call.getArgOperand(1);
+ Value *Mask = Call.getArgOperand(2);
+ Value *PassThru = Call.getArgOperand(3);
+ Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
+ Call);
// DataTy is the overloaded type
Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
- Assert(DataTy == CS.getType(),
- "masked_load: return must match pointer type", CS);
+ Assert(DataTy == Call.getType(),
+ "masked_load: return must match pointer type", Call);
Assert(PassThru->getType() == DataTy,
- "masked_load: pass through and data type must match", CS);
+ "masked_load: pass through and data type must match", Call);
Assert(Mask->getType()->getVectorNumElements() ==
- DataTy->getVectorNumElements(),
- "masked_load: vector mask must be same length as data", CS);
+ DataTy->getVectorNumElements(),
+ "masked_load: vector mask must be same length as data", Call);
break;
}
case Intrinsic::masked_store: {
- Value *Val = CS.getArgOperand(0);
- Value *Ptr = CS.getArgOperand(1);
- //Value *Alignment = CS.getArgOperand(2);
- Value *Mask = CS.getArgOperand(3);
- Assert(Mask->getType()->isVectorTy(),
- "masked_store: mask must be vector", CS);
+ Value *Val = Call.getArgOperand(0);
+ Value *Ptr = Call.getArgOperand(1);
+ // Value *Alignment = Call.getArgOperand(2);
+ Value *Mask = Call.getArgOperand(3);
+ Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
+ Call);
// DataTy is the overloaded type
Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
Assert(DataTy == Val->getType(),
- "masked_store: storee must match pointer type", CS);
+ "masked_store: storee must match pointer type", Call);
Assert(Mask->getType()->getVectorNumElements() ==
- DataTy->getVectorNumElements(),
- "masked_store: vector mask must be same length as data", CS);
+ DataTy->getVectorNumElements(),
+ "masked_store: vector mask must be same length as data", Call);
break;
}
case Intrinsic::experimental_guard: {
- Assert(CS.isCall(), "experimental_guard cannot be invoked", CS);
- Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
+ Assert(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
+ Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
"experimental_guard must have exactly one "
"\"deopt\" operand bundle");
break;
}
case Intrinsic::experimental_deoptimize: {
- Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS);
- Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
+ Assert(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
+ Call);
+ Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
"experimental_deoptimize must have exactly one "
"\"deopt\" operand bundle");
- Assert(CS.getType() == CS.getInstruction()->getFunction()->getReturnType(),
+ Assert(Call.getType() == Call.getFunction()->getReturnType(),
"experimental_deoptimize return type must match caller return type");
- if (CS.isCall()) {
- auto *DeoptCI = CS.getInstruction();
- auto *RI = dyn_cast<ReturnInst>(DeoptCI->getNextNode());
+ if (isa<CallInst>(Call)) {
+ auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
Assert(RI,
"calls to experimental_deoptimize must be followed by a return");
- if (!CS.getType()->isVoidTy() && RI)
- Assert(RI->getReturnValue() == DeoptCI,
+ if (!Call.getType()->isVoidTy() && RI)
+ Assert(RI->getReturnValue() == &Call,
"calls to experimental_deoptimize must be followed by a return "
"of the value computed by experimental_deoptimize");
}
@@ -4551,8 +4551,8 @@ void Verifier::visitIntrinsicCallSite(In
case Intrinsic::uadd_sat:
case Intrinsic::ssub_sat:
case Intrinsic::usub_sat: {
- Value *Op1 = CS.getArgOperand(0);
- Value *Op2 = CS.getArgOperand(1);
+ Value *Op1 = Call.getArgOperand(0);
+ Value *Op2 = Call.getArgOperand(1);
Assert(Op1->getType()->isIntOrIntVectorTy(),
"first operand of [us][add|sub]_sat must be an int type or vector "
"of ints");
@@ -4562,8 +4562,8 @@ void Verifier::visitIntrinsicCallSite(In
break;
}
case Intrinsic::smul_fix: {
- Value *Op1 = CS.getArgOperand(0);
- Value *Op2 = CS.getArgOperand(1);
+ Value *Op1 = Call.getArgOperand(0);
+ Value *Op2 = Call.getArgOperand(1);
Assert(Op1->getType()->isIntOrIntVectorTy(),
"first operand of smul_fix must be an int type or vector "
"of ints");
@@ -4571,7 +4571,7 @@ void Verifier::visitIntrinsicCallSite(In
"second operand of smul_fix must be an int type or vector "
"of ints");
- auto *Op3 = dyn_cast<ConstantInt>(CS.getArgOperand(2));
+ auto *Op3 = dyn_cast<ConstantInt>(Call.getArgOperand(2));
Assert(Op3, "third argument of smul_fix must be a constant integer");
Assert(Op3->getType()->getBitWidth() <= 32,
"third argument of smul_fix must fit within 32 bits");
More information about the llvm-commits
mailing list