[llvm] 4824d87 - Revert "Allow invokable sub-classes of IntrinsicInst"

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 20 15:38:48 PDT 2021


Author: Philip Reames
Date: 2021-04-20T15:38:38-07:00
New Revision: 4824d876f00a08e00350dab347c6953d552ec83b

URL: https://github.com/llvm/llvm-project/commit/4824d876f00a08e00350dab347c6953d552ec83b
DIFF: https://github.com/llvm/llvm-project/commit/4824d876f00a08e00350dab347c6953d552ec83b.diff

LOG: Revert "Allow invokable sub-classes of IntrinsicInst"

This reverts commit d87b9b81ccb95217181ce75515c6c68bbb408ca4.

Post commit review raised concerns, reverting while discussion happens.

Added: 
    

Modified: 
    llvm/include/llvm/IR/InstVisitor.h
    llvm/include/llvm/IR/IntrinsicInst.h
    llvm/lib/Analysis/TypeMetadataUtils.cpp
    llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
    llvm/lib/CodeGen/ShadowStackGCLowering.cpp
    llvm/lib/CodeGen/StackProtector.cpp
    llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
    llvm/lib/Target/AArch64/AArch64FastISel.cpp
    llvm/lib/Target/Mips/MipsFastISel.cpp
    llvm/lib/Target/X86/X86FastISel.cpp
    llvm/lib/Transforms/InstCombine/InstCombineInternal.h
    llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index d42eceea135b3..4dbdc66d13663 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -154,8 +154,8 @@ class InstVisitor {
   // instruction to a special delegation helper.
 #define HANDLE_INST(NUM, OPCODE, CLASS) \
     RetTy visit##OPCODE(CLASS &I) { \
-      if (NUM == Instruction::Call || NUM == Instruction::Invoke) \
-        return delegateCallBase(I); \
+      if (NUM == Instruction::Call) \
+        return delegateCallInst(I); \
       else \
         DELEGATE(CLASS); \
     }
@@ -215,12 +215,7 @@ class InstVisitor {
   RetTy visitVAStartInst(VAStartInst &I)          { DELEGATE(IntrinsicInst); }
   RetTy visitVAEndInst(VAEndInst &I)              { DELEGATE(IntrinsicInst); }
   RetTy visitVACopyInst(VACopyInst &I)            { DELEGATE(IntrinsicInst); }
-  RetTy visitIntrinsicInst(IntrinsicInst &I)      {
-    if (isa<CallInst>(I))
-      return static_cast<SubClass*>(this)->visitCallInst(cast<CallInst>(I));
-    else
-      return static_cast<SubClass*>(this)->visitInvokeInst(cast<InvokeInst>(I));
-  }
+  RetTy visitIntrinsicInst(IntrinsicInst &I)      { DELEGATE(CallInst); }
   RetTy visitCallInst(CallInst &I)                { DELEGATE(CallBase); }
   RetTy visitInvokeInst(InvokeInst &I)            { DELEGATE(CallBase); }
   RetTy visitCallBrInst(CallBrInst &I)            { DELEGATE(CallBase); }
@@ -285,7 +280,7 @@ class InstVisitor {
 
 private:
   // Special helper function to delegate to CallInst subclass visitors.
-  RetTy delegateCallBase(CallBase &I) {
+  RetTy delegateCallInst(CallInst &I) {
     if (const Function *F = I.getCalledFunction()) {
       switch (F->getIntrinsicID()) {
       default:                     DELEGATE(IntrinsicInst);
@@ -301,16 +296,13 @@ class InstVisitor {
       case Intrinsic::not_intrinsic: break;
       }
     }
-    if (isa<CallInst>(I))
-      return static_cast<SubClass*>(this)->visitCallInst(cast<CallInst>(I));
-    else
-      return static_cast<SubClass*>(this)->visitInvokeInst(cast<InvokeInst>(I));
+    DELEGATE(CallInst);
   }
 
   // An overload that will never actually be called, it is used only from dead
   // code in the dispatching from opcodes to instruction subclasses.
-  RetTy delegateCallBase(Instruction &I) {
-    llvm_unreachable("delegateCallBase called for non-CallBase");
+  RetTy delegateCallInst(Instruction &I) {
+    llvm_unreachable("delegateCallInst called for non-CallInst");
   }
 };
 

diff  --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index bcafda577e0a3..e217f973b4b83 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -13,10 +13,10 @@
 //     if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst))
 //        ... MCI->getDest() ... MCI->getSource() ...
 //
-// All intrinsic function calls are instances of the call or invoke instruction,
-// so these are all subclasses of the CallBase class.  Note that none of these
-// classes has state or virtual methods, which is an important part of this
-// gross/neat hack working.
+// All intrinsic function calls are instances of the call instruction, so these
+// are all subclasses of the CallInst class.  Note that none of these classes
+// has state or virtual methods, which is an important part of this gross/neat
+// hack working.
 //
 //===----------------------------------------------------------------------===//
 
@@ -42,7 +42,7 @@ namespace llvm {
 /// A wrapper class for inspecting calls to intrinsic functions.
 /// This allows the standard isa/dyncast/cast functionality to work with calls
 /// to intrinsic functions.
-class IntrinsicInst : public CallBase {
+class IntrinsicInst : public CallInst {
 public:
   IntrinsicInst() = delete;
   IntrinsicInst(const IntrinsicInst &) = delete;
@@ -107,13 +107,13 @@ class IntrinsicInst : public CallBase {
   }
 
   // Methods for support type inquiry through isa, cast, and dyn_cast:
-  static bool classof(const CallBase *I) {
+  static bool classof(const CallInst *I) {
     if (const Function *CF = I->getCalledFunction())
       return CF->isIntrinsic();
     return false;
   }
   static bool classof(const Value *V) {
-    return isa<CallBase>(V) && classof(cast<CallBase>(V));
+    return isa<CallInst>(V) && classof(cast<CallInst>(V));
   }
 };
 

diff  --git a/llvm/lib/Analysis/TypeMetadataUtils.cpp b/llvm/lib/Analysis/TypeMetadataUtils.cpp
index 1c31d16d65f59..f015ba9a09cab 100644
--- a/llvm/lib/Analysis/TypeMetadataUtils.cpp
+++ b/llvm/lib/Analysis/TypeMetadataUtils.cpp
@@ -83,7 +83,7 @@ void llvm::findDevirtualizableCallsForTypeTest(
   // Find llvm.assume intrinsics for this llvm.type.test call.
   for (const Use &CIU : CI->uses())
     if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
-      Assumes.push_back(cast<CallInst>(Assume));
+      Assumes.push_back(Assume);
 
   // If we found any, search for virtual calls based on %p and add them to
   // DevirtCalls.

diff  --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index b9547bb7223c1..faf3a848a69db 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -1338,15 +1338,15 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
     return true;
   }
   case Intrinsic::experimental_stackmap:
-    return selectStackmap(cast<CallInst>(II));
+    return selectStackmap(II);
   case Intrinsic::experimental_patchpoint_void:
   case Intrinsic::experimental_patchpoint_i64:
-    return selectPatchpoint(cast<CallInst>(II));
+    return selectPatchpoint(II);
 
   case Intrinsic::xray_customevent:
-    return selectXRayCustomEvent(cast<CallInst>(II));
+    return selectXRayCustomEvent(II);
   case Intrinsic::xray_typedevent:
-    return selectXRayTypedEvent(cast<CallInst>(II));
+    return selectXRayTypedEvent(II);
   }
 
   return fastLowerIntrinsicCall(II);

diff  --git a/llvm/lib/CodeGen/ShadowStackGCLowering.cpp b/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
index ae59b0bea9e85..36752ef86526d 100644
--- a/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
+++ b/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
@@ -244,7 +244,7 @@ void ShadowStackGCLowering::CollectRoots(Function &F) {
         if (Function *F = CI->getCalledFunction())
           if (F->getIntrinsicID() == Intrinsic::gcroot) {
             std::pair<CallInst *, AllocaInst *> Pair = std::make_pair(
-                cast<CallInst>(CI),
+                CI,
                 cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
             if (IsNullValue(CI->getArgOperand(1)))
               Roots.push_back(Pair);

diff  --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index b0629b31899f4..ff6ff6dfe03c2 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -255,7 +255,7 @@ static const CallInst *findStackProtectorIntrinsic(Function &F) {
     for (const Instruction &I : BB)
       if (const auto *II = dyn_cast<IntrinsicInst>(&I))
         if (II->getIntrinsicID() == Intrinsic::stackprotector)
-          return cast<CallInst>(II);
+          return II;
   return nullptr;
 }
 

diff  --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index 0add020ce4963..62e1ea6e0f0af 100644
--- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -1143,7 +1143,7 @@ void Interpreter::visitIntrinsicInst(IntrinsicInst &I) {
   bool atBegin(Parent->begin() == Me);
   if (!atBegin)
     --Me;
-  IL->LowerIntrinsicCall(cast<CallInst>(&I));
+  IL->LowerIntrinsicCall(&I);
 
   // Restore the CurInst pointer to the first instruction newly inserted, if
   // any.

diff  --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 09fa49a912705..95b5699552b0f 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -3482,8 +3482,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
       return false;
 
     const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
-    return lowerCallTo(cast<CallInst>(II), IntrMemName,
-                       II->getNumArgOperands() - 1);
+    return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
   }
   case Intrinsic::memset: {
     const MemSetInst *MSI = cast<MemSetInst>(II);
@@ -3499,8 +3498,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
       // address spaces.
       return false;
 
-    return lowerCallTo(cast<CallInst>(II), "memset",
-                       II->getNumArgOperands() - 1);
+    return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
   }
   case Intrinsic::sin:
   case Intrinsic::cos:

diff  --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp
index b7a8a06ccfc51..e963185eaeaa0 100644
--- a/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -1660,7 +1660,7 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
     if (!MTI->getLength()->getType()->isIntegerTy(32))
       return false;
     const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
-    return lowerCallTo(cast<CallInst>(II), IntrMemName, II->getNumArgOperands() - 1);
+    return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
   }
   case Intrinsic::memset: {
     const MemSetInst *MSI = cast<MemSetInst>(II);
@@ -1669,7 +1669,7 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
       return false;
     if (!MSI->getLength()->getType()->isIntegerTy(32))
       return false;
-    return lowerCallTo(cast<CallInst>(II), "memset", II->getNumArgOperands() - 1);
+    return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
   }
   }
   return false;

diff  --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index e41f6ee4b6ec8..bd08af81e679d 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -2738,7 +2738,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
     if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
       return false;
 
-    return lowerCallTo(cast<CallInst>(II), "memcpy", II->getNumArgOperands() - 1);
+    return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 1);
   }
   case Intrinsic::memset: {
     const MemSetInst *MSI = cast<MemSetInst>(II);
@@ -2753,7 +2753,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
     if (MSI->getDestAddressSpace() > 255)
       return false;
 
-    return lowerCallTo(cast<CallInst>(II), "memset", II->getNumArgOperands() - 1);
+    return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
   }
   case Intrinsic::stackprotector: {
     // Emit code to store the stack guard onto the stack.

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index b8ba0f7c59b2d..15152bb6f11aa 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -140,7 +140,6 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
   Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
   Instruction *foldItoFPtoI(CastInst &FI);
   Instruction *visitSelectInst(SelectInst &SI);
-  Instruction *visitCallBase(CallBase &Call);
   Instruction *visitCallInst(CallInst &CI);
   Instruction *visitInvokeInst(InvokeInst &II);
   Instruction *visitCallBrInst(CallBrInst &CBI);
@@ -223,6 +222,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
                              Instruction &CtxI, Value *&OperationResult,
                              Constant *&OverflowResult);
 
+  Instruction *visitCallBase(CallBase &Call);
   Instruction *tryOptimizeCall(CallInst *CI);
   bool transformConstExprCastCall(CallBase &Call);
   Instruction *transformCallThroughTrampoline(CallBase &Call,

diff  --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index caf6f29c02e1d..3e4fae586aae2 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -4142,7 +4142,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
   Value *VAArgTLSOriginCopy = nullptr;
   Value *VAArgOverflowSize = nullptr;
 
-  SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
+  SmallVector<CallInst*, 16> VAStartInstrumentationList;
 
   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
 
@@ -4351,7 +4351,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
     // Instrument va_start.
     // Copy va_list shadow from the backup copy of the TLS contents.
     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
-      auto *OrigInst = VAStartInstrumentationList[i];
+      CallInst *OrigInst = VAStartInstrumentationList[i];
       IRBuilder<> IRB(OrigInst->getNextNode());
       Value *VAListTag = OrigInst->getArgOperand(0);
 
@@ -4405,7 +4405,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
   Value *VAArgTLSCopy = nullptr;
   Value *VAArgSize = nullptr;
 
-  SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
+  SmallVector<CallInst*, 16> VAStartInstrumentationList;
 
   VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
@@ -4494,7 +4494,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
     // Instrument va_start.
     // Copy va_list shadow from the backup copy of the TLS contents.
     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
-      auto *OrigInst = VAStartInstrumentationList[i];
+      CallInst *OrigInst = VAStartInstrumentationList[i];
       IRBuilder<> IRB(OrigInst->getNextNode());
       Value *VAListTag = OrigInst->getArgOperand(0);
       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
@@ -4533,7 +4533,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
   Value *VAArgTLSCopy = nullptr;
   Value *VAArgOverflowSize = nullptr;
 
-  SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
+  SmallVector<CallInst*, 16> VAStartInstrumentationList;
 
   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
 
@@ -4688,7 +4688,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
     // Instrument va_start, copy va_list shadow from the backup copy of
     // the TLS contents.
     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
-      auto *OrigInst = VAStartInstrumentationList[i];
+      CallInst *OrigInst = VAStartInstrumentationList[i];
       IRBuilder<> IRB(OrigInst->getNextNode());
 
       Value *VAListTag = OrigInst->getArgOperand(0);
@@ -4783,7 +4783,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
   Value *VAArgTLSCopy = nullptr;
   Value *VAArgSize = nullptr;
 
-  SmallVector<IntrinsicInst*, 16> VAStartInstrumentationList;
+  SmallVector<CallInst*, 16> VAStartInstrumentationList;
 
   VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
                     MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
@@ -4932,7 +4932,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
     // Instrument va_start.
     // Copy va_list shadow from the backup copy of the TLS contents.
     for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
-      auto *OrigInst = VAStartInstrumentationList[i];
+      CallInst *OrigInst = VAStartInstrumentationList[i];
       IRBuilder<> IRB(OrigInst->getNextNode());
       Value *VAListTag = OrigInst->getArgOperand(0);
       Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
@@ -4972,7 +4972,7 @@ struct VarArgSystemZHelper : public VarArgHelper {
   Value *VAArgTLSOriginCopy = nullptr;
   Value *VAArgOverflowSize = nullptr;
 
-  SmallVector<IntrinsicInst *, 16> VAStartInstrumentationList;
+  SmallVector<CallInst *, 16> VAStartInstrumentationList;
 
   enum class ArgKind {
     GeneralPurpose,
@@ -5255,7 +5255,7 @@ struct VarArgSystemZHelper : public VarArgHelper {
     // Copy va_list shadow from the backup copy of the TLS contents.
     for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size();
          VaStartNo < VaStartNum; VaStartNo++) {
-      auto *OrigInst = VAStartInstrumentationList[VaStartNo];
+      CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
       IRBuilder<> IRB(OrigInst->getNextNode());
       Value *VAListTag = OrigInst->getArgOperand(0);
       copyRegSaveArea(IRB, VAListTag);


        


More information about the llvm-commits mailing list