[llvm] e9a2aa6 - [amdgpu][lds] Use a consistent order of fields in generated structs

Jon Chesterfield via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 9 07:57:56 PST 2022


Author: Jon Chesterfield
Date: 2022-11-09T15:57:41Z
New Revision: e9a2aa6355968863e95667f73ba95a785ea9d8a8

URL: https://github.com/llvm/llvm-project/commit/e9a2aa6355968863e95667f73ba95a785ea9d8a8
DIFF: https://github.com/llvm/llvm-project/commit/e9a2aa6355968863e95667f73ba95a785ea9d8a8.diff

LOG: [amdgpu][lds] Use a consistent order of fields in generated structs

Avoids spurious and confusing test failures on changing implementation.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D136598

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
    llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index de56ab7d305a5..be5ac67f9e37d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -390,10 +390,22 @@ class AMDGPULowerModuleLDS : public ModulePass {
 
     SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
     LayoutFields.reserve(LDSVarsToTransform.size());
-    for (GlobalVariable *GV : LDSVarsToTransform) {
-      OptimizedStructLayoutField F(GV, DL.getTypeAllocSize(GV->getValueType()),
-                                   AMDGPU::getAlign(DL, GV));
-      LayoutFields.emplace_back(F);
+    {
+      // The order of fields in this struct depends on the order of
+      // varables in the argument which varies when changing how they
+      // are identified, leading to spurious test breakage.
+      std::vector<GlobalVariable *> Sorted(LDSVarsToTransform.begin(),
+                                           LDSVarsToTransform.end());
+      llvm::sort(Sorted.begin(), Sorted.end(),
+                 [](const GlobalVariable *lhs, const GlobalVariable *rhs) {
+                   return lhs->getName() < rhs->getName();
+                 });
+      for (GlobalVariable *GV : Sorted) {
+        OptimizedStructLayoutField F(GV,
+                                     DL.getTypeAllocSize(GV->getValueType()),
+                                     AMDGPU::getAlign(DL, GV));
+        LayoutFields.emplace_back(F);
+      }
     }
 
     performOptimizedStructLayout(LayoutFields);

diff  --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll
index cffaf0d9295df..ebe9ccaa8d39a 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll
@@ -3,18 +3,18 @@
 
 ; CHECK: %llvm.amdgcn.module.lds.t = type { float, float }
 
- at func = addrspace(3) global float undef, align 4
+ at a_func = addrspace(3) global float undef, align 4
 
 ; CHECK: %llvm.amdgcn.kernel.timestwo.lds.t = type { float }
 
 @kern = addrspace(3) global float undef, align 4
 
-; @func is only used from a non-kernel function so is rewritten
-; CHECK-NOT: @func
-; @both is used from a non-kernel function so is rewritten
-; CHECK-NOT: @both
-; sorted both < func, so @both at null and @func at 4
- at both = addrspace(3) global float undef, align 4
+; @a_func is only used from a non-kernel function so is rewritten
+; CHECK-NOT: @a_func
+; @b_both is used from a non-kernel function so is rewritten
+; CHECK-NOT: @b_both
+; sorted both < func, so @b_both at null and @a_func at 4
+ at b_both = addrspace(3) global float undef, align 4
 
 ; CHECK: @llvm.amdgcn.module.lds = internal addrspace(3) global %llvm.amdgcn.module.lds.t undef, align 4
 ; CHECK: @llvm.amdgcn.kernel.timestwo.lds = internal addrspace(3) global %llvm.amdgcn.kernel.timestwo.lds.t undef, align 4
@@ -32,7 +32,7 @@
 ; CHECK:       ret i32 %8
 define i32 @get_func() local_unnamed_addr #0 {
 entry:
-  %0 = load i32, i32* inttoptr (i64 add (i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @func to i32 addrspace(3)*) to i32*) to i64), i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @func to i32 addrspace(3)*) to i32*) to i64)) to i32*), align 4
+  %0 = load i32, i32* inttoptr (i64 add (i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @a_func to i32 addrspace(3)*) to i32*) to i64), i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @a_func to i32 addrspace(3)*) to i32*) to i64)) to i32*), align 4
   ret i32 %0
 }
 
@@ -49,7 +49,7 @@ entry:
 ; CHECK:      ret void
 define void @set_func(i32 %x) local_unnamed_addr #1 {
 entry:
-  store i32 %x, i32* inttoptr (i64 add (i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @both to i32 addrspace(3)*) to i32*) to i64), i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @both to i32 addrspace(3)*) to i32*) to i64)) to i32*), align 4
+  store i32 %x, i32* inttoptr (i64 add (i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @b_both to i32 addrspace(3)*) to i32*) to i64), i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @b_both to i32 addrspace(3)*) to i32*) to i64)) to i32*), align 4
   ret void
 }
 
@@ -78,9 +78,9 @@ entry:
 ; CHECK:      store i32 %mul, i32* %16, align 4
 ; CHECK:      ret void
 define amdgpu_kernel void @timestwo() {
-  %ld = load i32, i32* inttoptr (i64 add (i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @both to i32 addrspace(3)*) to i32*) to i64), i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @kern to i32 addrspace(3)*) to i32*) to i64)) to i32*), align 4
+  %ld = load i32, i32* inttoptr (i64 add (i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @b_both to i32 addrspace(3)*) to i32*) to i64), i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @kern to i32 addrspace(3)*) to i32*) to i64)) to i32*), align 4
   %mul = mul i32 %ld, 2
-  store i32 %mul, i32* inttoptr (i64 add (i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @kern to i32 addrspace(3)*) to i32*) to i64), i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @both to i32 addrspace(3)*) to i32*) to i64)) to i32*), align 4
+  store i32 %mul, i32* inttoptr (i64 add (i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @kern to i32 addrspace(3)*) to i32*) to i64), i64 ptrtoint (i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @b_both to i32 addrspace(3)*) to i32*) to i64)) to i32*), align 4
   ret void
 }
 


        


More information about the llvm-commits mailing list