[llvm] d87b9b8 - Allow invokable sub-classes of IntrinsicInst

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 20 15:06:00 PDT 2021


Author: Philip Reames
Date: 2021-04-20T15:03:49-07:00
New Revision: d87b9b81ccb95217181ce75515c6c68bbb408ca4

URL: https://github.com/llvm/llvm-project/commit/d87b9b81ccb95217181ce75515c6c68bbb408ca4
DIFF: https://github.com/llvm/llvm-project/commit/d87b9b81ccb95217181ce75515c6c68bbb408ca4.diff

LOG: Allow invokable sub-classes of IntrinsicInst

It used to be that all of our intrinsics were call instructions, but over time, we've added more and more invokable intrinsics. According to the verifier, we're up to 8 right now. As IntrinsicInst is a sub-class of CallInst, this puts us in an awkward spot where the idiomatic means to check for intrinsic has a false negative if the intrinsic is invoked.

This change switches IntrinsicInst from being a sub-class of CallInst to being a subclass of CallBase. This allows invoked intrinsics to be instances of IntrinsicInst, at the cost of requiring a few more casts to CallInst in places where the intrinsic really is known to be a call, not an invoke.

After this lands and has baked for a couple days, planned cleanups:
    Make GCStatepointInst a IntrinsicInst subclass.
    Merge intrinsic handling in InstCombine and use idiomatic visitIntrinsicInst entry point for InstVisitor.
    Do the same in SelectionDAG.
    Do the same in FastISEL.

Differential Revision: https://reviews.llvm.org/D99976

Added: 
    

Modified: 
    llvm/include/llvm/IR/InstVisitor.h
    llvm/include/llvm/IR/IntrinsicInst.h
    llvm/lib/Analysis/TypeMetadataUtils.cpp
    llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
    llvm/lib/CodeGen/ShadowStackGCLowering.cpp
    llvm/lib/CodeGen/StackProtector.cpp
    llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
    llvm/lib/Target/AArch64/AArch64FastISel.cpp
    llvm/lib/Target/Mips/MipsFastISel.cpp
    llvm/lib/Target/X86/X86FastISel.cpp
    llvm/lib/Transforms/InstCombine/InstCombineInternal.h
    llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index 4dbdc66d13663..d42eceea135b3 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -154,8 +154,8 @@ class InstVisitor {
   // instruction to a special delegation helper.
 #define HANDLE_INST(NUM, OPCODE, CLASS) \
     RetTy visit##OPCODE(CLASS &I) { \
-      if (NUM == Instruction::Call) \
-        return delegateCallInst(I); \
+      if (NUM == Instruction::Call || NUM == Instruction::Invoke) \
+        return delegateCallBase(I); \
       else \
         DELEGATE(CLASS); \
     }
@@ -215,7 +215,12 @@ class InstVisitor {
   RetTy visitVAStartInst(VAStartInst &I)          { DELEGATE(IntrinsicInst); }
   RetTy visitVAEndInst(VAEndInst &I)              { DELEGATE(IntrinsicInst); }
   RetTy visitVACopyInst(VACopyInst &I)            { DELEGATE(IntrinsicInst); }
-  RetTy visitIntrinsicInst(IntrinsicInst &I)      { DELEGATE(CallInst); }
+  RetTy visitIntrinsicInst(IntrinsicInst &I)      {
+    if (isa<CallInst>(I))
+      return static_cast<SubClass*>(this)->visitCallInst(cast<CallInst>(I));
+    else
+      return static_cast<SubClass*>(this)->visitInvokeInst(cast<InvokeInst>(I));
+  }
   RetTy visitCallInst(CallInst &I)                { DELEGATE(CallBase); }
   RetTy visitInvokeInst(InvokeInst &I)            { DELEGATE(CallBase); }
   RetTy visitCallBrInst(CallBrInst &I)            { DELEGATE(CallBase); }
@@ -280,7 +285,7 @@ class InstVisitor {
 
 private:
   // Special helper function to delegate to CallInst subclass visitors.
-  RetTy delegateCallInst(CallInst &I) {
+  RetTy delegateCallBase(CallBase &I) {
     if (const Function *F = I.getCalledFunction()) {
       switch (F->getIntrinsicID()) {
       default:                     DELEGATE(IntrinsicInst);
@@ -296,13 +301,16 @@ class InstVisitor {
       case Intrinsic::not_intrinsic: break;
       }
     }
-    DELEGATE(CallInst);
+    if (isa<CallInst>(I))
+      return static_cast<SubClass*>(this)->visitCallInst(cast<CallInst>(I));
+    else
+      return static_cast<SubClass*>(this)->visitInvokeInst(cast<InvokeInst>(I));
   }
 
   // An overload that will never actually be called, it is used only from dead
   // code in the dispatching from opcodes to instruction subclasses.
-  RetTy delegateCallInst(Instruction &I) {
-    llvm_unreachable("delegateCallInst called for non-CallInst");
+  RetTy delegateCallBase(Instruction &I) {
+    llvm_unreachable("delegateCallBase called for non-CallBase");
   }
 };
 

diff  --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index e217f973b4b83..bcafda577e0a3 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -13,10 +13,10 @@
 //     if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst))
 //        ... MCI->getDest() ... MCI->getSource() ...
 //
-// All intrinsic function calls are instances of the call instruction, so these
-// are all subclasses of the CallInst class.  Note that none of these classes
-// has state or virtual methods, which is an important part of this gross/neat
-// hack working.
+// All intrinsic function calls are instances of the call or invoke instruction,
+// so these are all subclasses of the CallBase class.  Note that none of these
+// classes has state or virtual methods, which is an important part of this
+// gross/neat hack working.
 //
 //===----------------------------------------------------------------------===//
 
@@ -42,7 +42,7 @@ namespace llvm {
 /// A wrapper class for inspecting calls to intrinsic functions.
 /// This allows the standard isa/dyncast/cast functionality to work with calls
 /// to intrinsic functions.
-class IntrinsicInst : public CallInst {
+class IntrinsicInst : public CallBase {
 public:
   IntrinsicInst() = delete;
   IntrinsicInst(const IntrinsicInst &) = delete;
@@ -107,13 +107,13 @@ class IntrinsicInst : public CallInst {
   }
 
   // Methods for support type inquiry through isa, cast, and dyn_cast:
-  static bool classof(const CallInst *I) {
+  static bool classof(const CallBase *I) {
     if (const Function *CF = I->getCalledFunction())
       return CF->isIntrinsic();
     return false;
   }
   static bool classof(const Value *V) {
-    return isa<CallInst>(V) && classof(cast<CallInst>(V));
+    return isa<CallBase>(V) && classof(cast<CallBase>(V));
   }
 };
 

diff  --git a/llvm/lib/Analysis/TypeMetadataUtils.cpp b/llvm/lib/Analysis/TypeMetadataUtils.cpp
index f015ba9a09cab..1c31d16d65f59 100644
--- a/llvm/lib/Analysis/TypeMetadataUtils.cpp
+++ b/llvm/lib/Analysis/TypeMetadataUtils.cpp
@@ -83,7 +83,7 @@ void llvm::findDevirtualizableCallsForTypeTest(
   // Find llvm.assume intrinsics for this llvm.type.test call.
   for (const Use &CIU : CI->uses())
     if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
-      Assumes.push_back(Assume);
+      Assumes.push_back(cast<CallInst>(Assume));
 
   // If we found any, search for virtual calls based on %p and add them to
   // DevirtCalls.

diff  --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index faf3a848a69db..b9547bb7223c1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -1338,15 +1338,15 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
     return true;
   }
   case Intrinsic::experimental_stackmap:
-    return selectStackmap(II);
+    return selectStackmap(cast<CallInst>(II));
   case Intrinsic::experimental_patchpoint_void:
   case Intrinsic::experimental_patchpoint_i64:
-    return selectPatchpoint(II);
+    return selectPatchpoint(cast<CallInst>(II));
 
   case Intrinsic::xray_customevent:
-    return selectXRayCustomEvent(II);
+    return selectXRayCustomEvent(cast<CallInst>(II));
   case Intrinsic::xray_typedevent:
-    return selectXRayTypedEvent(II);
+    return selectXRayTypedEvent(cast<CallInst>(II));
   }
 
   return fastLowerIntrinsicCall(II);

diff  --git a/llvm/lib/CodeGen/ShadowStackGCLowering.cpp b/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
index 36752ef86526d..ae59b0bea9e85 100644
--- a/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
+++ b/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
@@ -244,7 +244,7 @@ void ShadowStackGCLowering::CollectRoots(Function &F) {
         if (Function *F = CI->getCalledFunction())
           if (F->getIntrinsicID() == Intrinsic::gcroot) {
             std::pair<CallInst *, AllocaInst *> Pair = std::make_pair(
-                CI,
+                cast<CallInst>(CI),
                 cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
             if (IsNullValue(CI->getArgOperand(1)))
               Roots.push_back(Pair);

diff  --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index ff6ff6dfe03c2..b0629b31899f4 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -255,7 +255,7 @@ static const CallInst *findStackProtectorIntrinsic(Function &F) {
     for (const Instruction &I : BB)
       if (const auto *II = dyn_cast<IntrinsicInst>(&I))
         if (II->getIntrinsicID() == Intrinsic::stackprotector)
-          return II;
+          return cast<CallInst>(II);
   return nullptr;
 }
 

diff  --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index 62e1ea6e0f0af..0add020ce4963 100644
--- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -1143,7 +1143,7 @@ void Interpreter::visitIntrinsicInst(IntrinsicInst &I) {
   bool atBegin(Parent->begin() == Me);
   if (!atBegin)
     --Me;
-  IL->LowerIntrinsicCall(&I);
+  IL->LowerIntrinsicCall(cast<CallInst>(&I));
 
   // Restore the CurInst pointer to the first instruction newly inserted, if
   // any.

diff  --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 95b5699552b0f..09fa49a912705 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -3482,7 +3482,8 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
       return false;
 
     const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
-    return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
+    return lowerCallTo(cast<CallInst>(II), IntrMemName,
+                       II->getNumArgOperands() - 1);
   }
   case Intrinsic::memset: {
     const MemSetInst *MSI = cast<MemSetInst>(II);
@@ -3498,7 +3499,8 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
       // address spaces.
       return false;
 
-    return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
+    return lowerCallTo(cast<CallInst>(II), "memset",
+                       II->getNumArgOperands() - 1);
   }
   case Intrinsic::sin:
   case Intrinsic::cos:

diff  --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp
index e963185eaeaa0..b7a8a06ccfc51 100644
--- a/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -1660,7 +1660,7 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
     if (!MTI->getLength()->getType()->isIntegerTy(32))
       return false;
     const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
-    return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
+    return lowerCallTo(cast<CallInst>(II), IntrMemName, II->getNumArgOperands() - 1);
   }
   case Intrinsic::memset: {
     const MemSetInst *MSI = cast<MemSetInst>(II);
@@ -1669,7 +1669,7 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
       return false;
     if (!MSI->getLength()->getType()->isIntegerTy(32))
       return false;
-    return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
+    return lowerCallTo(cast<CallInst>(II), "memset", II->getNumArgOperands() - 1);
   }
   }
   return false;

diff  --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index bd08af81e679d..e41f6ee4b6ec8 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -2738,7 +2738,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
     if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
       return false;
 
-    return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 1);
+    return lowerCallTo(cast<CallInst>(II), "memcpy", II->getNumArgOperands() - 1);
   }
   case Intrinsic::memset: {
     const MemSetInst *MSI = cast<MemSetInst>(II);
@@ -2753,7 +2753,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
     if (MSI->getDestAddressSpace() > 255)
       return false;
 
-    return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
+    return lowerCallTo(cast<CallInst>(II), "memset", II->getNumArgOperands() - 1);
   }
   case Intrinsic::stackprotector: {
     // Emit code to store the stack guard onto the stack.

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 15152bb6f11aa..b8ba0f7c59b2d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -140,6 +140,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
   Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
   Instruction *foldItoFPtoI(CastInst &FI);
   Instruction *visitSelectInst(SelectInst &SI);
+  Instruction *visitCallBase(CallBase &Call);
   Instruction *visitCallInst(CallInst &CI);
   Instruction *visitInvokeInst(InvokeInst &II);
   Instruction *visitCallBrInst(CallBrInst &CBI);
@@ -222,7 +223,6 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
                              Instruction &CtxI, Value *&OperationResult,
                              Constant *&OverflowResult);
 
-  Instruction *visitCallBase(CallBase &Call);
   Instruction *tryOptimizeCall(CallInst *CI);
   bool transformConstExprCastCall(CallBase &Call);
   Instruction *transformCallThroughTrampoline(CallBase &Call,

diff  --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 3e4fae586aae2..caf6f29c02e1d 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -4142,7 +4142,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
   Value *VAArgTLSOriginCopy = nullptr;
   Value *VAArgOverflowSize = nullptr;
 
-  SmallVector<CallInst*, 16> VAStartInstrumentationList;
+  SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
 
   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
 
@@ -4351,7 +4351,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
     // Instrument va_start.
     // Copy va_list shadow from the backup copy of the TLS contents.
     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
-      CallInst *OrigInst = VAStartInstrumentationList[i];
+      auto *OrigInst = VAStartInstrumentationList[i];
       IRBuilder<> IRB(OrigInst->getNextNode());
       Value *VAListTag = OrigInst->getArgOperand(0);
 
@@ -4405,7 +4405,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
   Value *VAArgTLSCopy = nullptr;
   Value *VAArgSize = nullptr;
 
-  SmallVector<CallInst*, 16> VAStartInstrumentationList;
+  SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
 
   VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
@@ -4494,7 +4494,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
     // Instrument va_start.
     // Copy va_list shadow from the backup copy of the TLS contents.
     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
-      CallInst *OrigInst = VAStartInstrumentationList[i];
+      auto *OrigInst = VAStartInstrumentationList[i];
       IRBuilder<> IRB(OrigInst->getNextNode());
       Value *VAListTag = OrigInst->getArgOperand(0);
       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
@@ -4533,7 +4533,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
   Value *VAArgTLSCopy = nullptr;
   Value *VAArgOverflowSize = nullptr;
 
-  SmallVector<CallInst*, 16> VAStartInstrumentationList;
+  SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
 
   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
 
@@ -4688,7 +4688,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
     // Instrument va_start, copy va_list shadow from the backup copy of
     // the TLS contents.
     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
-      CallInst *OrigInst = VAStartInstrumentationList[i];
+      auto *OrigInst = VAStartInstrumentationList[i];
       IRBuilder<> IRB(OrigInst->getNextNode());
 
       Value *VAListTag = OrigInst->getArgOperand(0);
@@ -4783,7 +4783,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
   Value *VAArgTLSCopy = nullptr;
   Value *VAArgSize = nullptr;
 
-  SmallVector<CallInst*, 16> VAStartInstrumentationList;
+  SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
 
   VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
@@ -4932,7 +4932,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
     // Instrument va_start.
     // Copy va_list shadow from the backup copy of the TLS contents.
     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
-      CallInst *OrigInst = VAStartInstrumentationList[i];
+      auto *OrigInst = VAStartInstrumentationList[i];
       IRBuilder<> IRB(OrigInst->getNextNode());
       Value *VAListTag = OrigInst->getArgOperand(0);
       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
@@ -4972,7 +4972,7 @@ struct VarArgSystemZHelper : public VarArgHelper {
   Value *VAArgTLSOriginCopy = nullptr;
   Value *VAArgOverflowSize = nullptr;
 
-  SmallVector<CallInst *, 16> VAStartInstrumentationList;
+  SmallVector<IntrinsicInst *, 16> VAStartInstrumentationList;
 
   enum class ArgKind {
     GeneralPurpose,
@@ -5255,7 +5255,7 @@ struct VarArgSystemZHelper : public VarArgHelper {
     // Copy va_list shadow from the backup copy of the TLS contents.
     for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size();
          VaStartNo < VaStartNum; VaStartNo++) {
-      CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
+      auto *OrigInst = VAStartInstrumentationList[VaStartNo];
       IRBuilder<> IRB(OrigInst->getNextNode());
       Value *VAListTag = OrigInst->getArgOperand(0);
       copyRegSaveArea(IRB, VAListTag);


        


More information about the llvm-commits mailing list