[llvm] d3c3c6b - AMDGPU: Fix treating divergent loads as uniform (#168785)

via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 20 08:10:27 PST 2025


Author: Matt Arsenault
Date: 2025-11-20T11:10:24-05:00
New Revision: d3c3c6bab5df051d9db12ea96add2211df9d81be

URL: https://github.com/llvm/llvm-project/commit/d3c3c6bab5df051d9db12ea96add2211df9d81be
DIFF: https://github.com/llvm/llvm-project/commit/d3c3c6bab5df051d9db12ea96add2211df9d81be.diff

LOG: AMDGPU: Fix treating divergent loads as uniform (#168785)

Avoids regression which caused the revert 6d5f87fc42.

This is a hack on a hack. We currently have isUniformMMO,
which improperly treats unknown source value as known uniform.
This is hack from before we had divergence information in the
DAG, and should be removed. This is the minimum change to avoid
the regression; removing the aggressive handling of the unknown
case (or dropping isUniformMMO entirely) are more involved fixes.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
    llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
    llvm/test/CodeGen/AMDGPU/load-select-ptr.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 41986fef213f4..6a0a9e3d3e5ac 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -4420,10 +4420,18 @@ bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
 
 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode *N) const {
   const auto *Ld = cast<LoadSDNode>(N);
-
   const MachineMemOperand *MMO = Ld->getMemOperand();
-  if (N->isDivergent() && !AMDGPU::isUniformMMO(MMO))
-    return false;
+
+  if (Ld->isDivergent()) {
+    // FIXME: We ought to able able to take the direct isDivergent result. We
+    // cannot rely on the MMO for a uniformity check, and should stop using
+    // it. This is a hack for 2 ways that the IR divergence analysis is superior
+    // to the DAG divergence: Recognizing shift-of-workitem-id as always
+    // uniform, and isSingleLaneExecution. These should be handled in the DAG
+    // version, and then this can be dropped.
+    if (!MMO->getValue() || !AMDGPU::isUniformMMO(MMO))
+      return false;
+  }
 
   return MMO->getSize().hasValue() &&
          Ld->getAlign() >=

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
index 5085e86d71c97..7caafa16f9043 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
@@ -28,6 +28,7 @@ Intrinsic::ID AMDGPU::getIntrinsicID(const MachineInstr &I) {
 
 // TODO: Should largely merge with AMDGPUTTIImpl::isSourceOfDivergence.
 bool AMDGPU::isUniformMMO(const MachineMemOperand *MMO) {
+  // FIXME: null value is should be treated as unknown, not as uniform.
   const Value *Ptr = MMO->getValue();
   // UndefValue means this is a load of a kernel input.  These are uniform.
   // Sometimes LDS instructions have constant pointers.

diff  --git a/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll b/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll
index d9ad9590d9762..61ab21e34e059 100644
--- a/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll
@@ -139,3 +139,87 @@ define amdgpu_kernel void @select_ptr_crash_i64_local_offsets(i32 %tmp, ptr addr
   store i64 %tmp5, ptr addrspace(1) %ptr2, align 8
   ret void
 }
+
+; The resultant load cannot be treated as uniform
+define amdgpu_kernel void @sample_test(ptr addrspace(1) %dest, ptr addrspace(1) %sourceA, ptr addrspace(1) %sourceB, i1 %tobool.not.i) #0 {
+; GCN-LABEL: sample_test:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN-NEXT:    s_load_dword s2, s[4:5], 0x18
+; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_bitcmp1_b32 s2, 0
+; GCN-NEXT:    s_load_dwordx2 s[2:3], s[6:7], 0x0
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v4, s3
+; GCN-NEXT:    v_mov_b32_e32 v5, s2
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v4, v1, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; GCN-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN-NEXT:    s_endpgm
+entry:
+  %0 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %conv2.i.i.i1 = zext i32 %0 to i64
+  %arrayidx.i = getelementptr i64, ptr addrspace(1) %sourceA, i64 %conv2.i.i.i1
+  %dest.gep = getelementptr i64, ptr addrspace(1) %dest, i64 %conv2.i.i.i1
+  %ld0 = load i64, ptr addrspace(1) %arrayidx.i, align 8, !amdgpu.noclobber !0
+  %ld1 = load i64, ptr addrspace(1) %sourceB, align 8
+  %cond.i = select i1 %tobool.not.i, i64 %ld0, i64 %ld1
+  store i64 %cond.i, ptr addrspace(1) %dest.gep, align 8
+  ret void
+}
+
+; The resultant load cannot be treated as uniform
+define amdgpu_kernel void @constant_is_not_uniform(ptr addrspace(1) %dest, ptr addrspace(4) %sourceA, ptr addrspace(4) %sourceB, i1 %tobool.not.i) #0 {
+; GCN-LABEL: constant_is_not_uniform:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN-NEXT:    s_load_dword s2, s[4:5], 0x18
+; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_bitcmp1_b32 s2, 0
+; GCN-NEXT:    s_load_dwordx2 s[2:3], s[6:7], 0x0
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v4, s3
+; GCN-NEXT:    v_mov_b32_e32 v5, s2
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v4, v1, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; GCN-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN-NEXT:    s_endpgm
+entry:
+  %0 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %conv2.i.i.i1 = zext i32 %0 to i64
+  %arrayidx.i = getelementptr i64, ptr addrspace(4) %sourceA, i64 %conv2.i.i.i1
+  %dest.gep = getelementptr i64, ptr addrspace(1) %dest, i64 %conv2.i.i.i1
+  %ld0 = load i64, ptr addrspace(4) %arrayidx.i, align 8
+  %ld1 = load i64, ptr addrspace(4) %sourceB, align 8
+  %cond.i = select i1 %tobool.not.i, i64 %ld0, i64 %ld1
+  store i64 %cond.i, ptr addrspace(1) %dest.gep, align 8
+  ret void
+}
+
+attributes #0 = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" }
+
+!0 = !{}


        


More information about the llvm-commits mailing list