[llvm] AMDGPU: don't call getSubtarget<GCNSubtarget> on R600 targets. (PR #162207)

James Y Knight via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 6 17:52:33 PDT 2025


https://github.com/jyknight created https://github.com/llvm/llvm-project/pull/162207

Unfortunately, `TargetMachine::getSubtarget<ST>` does an unchecked static_cast to `ST&`, which makes it easy to get wrong.

The modifications here were created by running check-llvm with an assert added to getSubtarget. However, that asssert requires that RTTI is enabled, which LLVM doesn't use, so I've reverted the assert before sending this fix upstream.

These errors have been present for some time, but were detected after #162040 caused an uninitialized memory read to be reported by asan/msan.

>From d6f729761c77b7fb380d4c23296f8f5c9cb6d984 Mon Sep 17 00:00:00 2001
From: James Y Knight <jyknight at google.com>
Date: Mon, 6 Oct 2025 20:43:27 -0400
Subject: [PATCH] AMDGPU: don't call getSubtarget<GCNSubtarget> on R600
 targets.

Unfortunately, `TargetMachine::getSubtarget<ST>` does an unchecked
static_cast to `ST&`, which makes it easy to get wrong.

The modifications here were created by running check-llvm with an
assert added to getSubtarget. However, that asssert requires that RTTI
is enabled, which LLVM doesn't use, so I've reverted the assert before
sending this fix upstream.
---
 llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp   | 26 ++++++++++++-------
 .../AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp  |  2 +-
 2 files changed, 17 insertions(+), 11 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
index cb49936871e74..f6186f1fae777 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
@@ -162,12 +162,16 @@ class AMDGPUInformationCache : public InformationCache {
 
   /// Check if the subtarget has aperture regs.
   bool hasApertureRegs(Function &F) {
+    if (!TM.getTargetTriple().isAMDGCN())
+      return false;
     const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
     return ST.hasApertureRegs();
   }
 
   /// Check if the subtarget supports GetDoorbellID.
   bool supportsGetDoorbellID(Function &F) {
+    if (!TM.getTargetTriple().isAMDGCN())
+      return false;
     const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
     return ST.supportsGetDoorbellID();
   }
@@ -182,18 +186,18 @@ class AMDGPUInformationCache : public InformationCache {
 
   std::pair<unsigned, unsigned>
   getDefaultFlatWorkGroupSize(const Function &F) const {
-    const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+    const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
     return ST.getDefaultFlatWorkGroupSize(F.getCallingConv());
   }
 
   std::pair<unsigned, unsigned>
   getMaximumFlatWorkGroupRange(const Function &F) {
-    const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+    const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
     return {ST.getMinFlatWorkGroupSize(), ST.getMaxFlatWorkGroupSize()};
   }
 
   SmallVector<unsigned> getMaxNumWorkGroups(const Function &F) {
-    const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+    const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
     return ST.getMaxNumWorkGroups(F);
   }
 
@@ -206,7 +210,7 @@ class AMDGPUInformationCache : public InformationCache {
   std::pair<unsigned, unsigned>
   getWavesPerEU(const Function &F,
                 std::pair<unsigned, unsigned> FlatWorkGroupSize) {
-    const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+    const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
     return ST.getWavesPerEU(FlatWorkGroupSize, getLDSSize(F), F);
   }
 
@@ -217,7 +221,7 @@ class AMDGPUInformationCache : public InformationCache {
     if (!Val)
       return std::nullopt;
     if (!Val->second) {
-      const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+      const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
       Val->second = ST.getMaxWavesPerEU();
     }
     return std::make_pair(Val->first, *(Val->second));
@@ -227,13 +231,13 @@ class AMDGPUInformationCache : public InformationCache {
   getEffectiveWavesPerEU(const Function &F,
                          std::pair<unsigned, unsigned> WavesPerEU,
                          std::pair<unsigned, unsigned> FlatWorkGroupSize) {
-    const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+    const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
     return ST.getEffectiveWavesPerEU(WavesPerEU, FlatWorkGroupSize,
                                      getLDSSize(F));
   }
 
   unsigned getMaxWavesPerEU(const Function &F) {
-    const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+    const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
     return ST.getMaxWavesPerEU();
   }
 
@@ -1511,9 +1515,11 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
       A.getOrCreateAAFor<AAAMDWavesPerEU>(IRPosition::function(*F));
     }
 
-    const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(*F);
-    if (!F->isDeclaration() && ST.hasClusters())
-      A.getOrCreateAAFor<AAAMDGPUClusterDims>(IRPosition::function(*F));
+    if (TM.getTargetTriple().isAMDGCN()) {
+      const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(*F);
+      if (!F->isDeclaration() && ST.hasClusters())
+        A.getOrCreateAAFor<AAAMDGPUClusterDims>(IRPosition::function(*F));
+    }
 
     for (auto &I : instructions(F)) {
       Value *Ptr = nullptr;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp
index 639089c75a33e..155c7dad904c5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp
@@ -281,7 +281,7 @@ bool optimizeSection(ArrayRef<SmallVector<IntrinsicInst *, 4>> MergeableInsts) {
 }
 
 static bool imageIntrinsicOptimizerImpl(Function &F, const TargetMachine *TM) {
-  if (!TM)
+  if (!TM || !TM->getTargetTriple().isAMDGCN())
     return false;
 
   // This optimization only applies to GFX11 and beyond.



More information about the llvm-commits mailing list