[llvm] AMDGPU: Generalize and normalize some tests to avoid future churn (PR #170508)

Nicolai Hähnle via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 3 08:45:15 PST 2025


https://github.com/nhaehnle created https://github.com/llvm/llvm-project/pull/170508

None

>From 90f9f54fe66bfe7de5c3a5e1118b34da9a4fafd2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Nicolai=20H=C3=A4hnle?= <nicolai.haehnle at amd.com>
Date: Thu, 6 Nov 2025 17:04:30 -0800
Subject: [PATCH] AMDGPU: Generalize and normalize some tests to avoid future
 churn

commit-id:c5c3b1e8
---
 llvm/test/CodeGen/AMDGPU/vector-alloca-atomic.ll | 16 ++++++++--------
 .../test/CodeGen/AMDGPU/vector-alloca-bitcast.ll |  6 ++++--
 2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/vector-alloca-atomic.ll b/llvm/test/CodeGen/AMDGPU/vector-alloca-atomic.ll
index 8e4cc2b0236c0..a7090960518af 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-alloca-atomic.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-alloca-atomic.ll
@@ -1,11 +1,11 @@
-; RUN: opt -S -mtriple=amdgcn-- -data-layout=A5 -passes='amdgpu-promote-alloca,sroa,instcombine' < %s | FileCheck -check-prefix=OPT %s
+; RUN: opt -S -mtriple=amdgcn-- -passes='amdgpu-promote-alloca,sroa,instcombine' < %s | FileCheck -check-prefix=OPT %s
 
 ; Show that what the alloca promotion pass will do for non-atomic load/store.
 
 ; OPT-LABEL: @vector_alloca_not_atomic(
 ;
-; OPT: extractelement <3 x i32> <i32 0, i32 1, i32 2>, i64 %index
-define amdgpu_kernel void @vector_alloca_not_atomic(ptr addrspace(1) %out, i64 %index) {
+; OPT: extractelement <3 x i32> <i32 0, i32 1, i32 2>, i32 %index
+define amdgpu_kernel void @vector_alloca_not_atomic(ptr addrspace(1) %out, i32 %index) {
 entry:
   %alloca = alloca [3 x i32], addrspace(5)
   %a1 = getelementptr [3 x i32], ptr addrspace(5) %alloca, i32 0, i32 1
@@ -13,7 +13,7 @@ entry:
   store i32 0, ptr addrspace(5) %alloca
   store i32 1, ptr addrspace(5) %a1
   store i32 2, ptr addrspace(5) %a2
-  %tmp = getelementptr [3 x i32], ptr addrspace(5) %alloca, i64 0, i64 %index
+  %tmp = getelementptr [3 x i32], ptr addrspace(5) %alloca, i32 0, i32 %index
   %data = load i32, ptr addrspace(5) %tmp
   store i32 %data, ptr addrspace(1) %out
   ret void
@@ -26,7 +26,7 @@ entry:
 ; OPT: store i32 1, ptr addrspace(5)
 ; OPT: store i32 2, ptr addrspace(5)
 ; OPT: load atomic i32, ptr addrspace(5)
-define amdgpu_kernel void @vector_alloca_atomic_read(ptr addrspace(1) %out, i64 %index) {
+define amdgpu_kernel void @vector_alloca_atomic_read(ptr addrspace(1) %out, i32 %index) {
 entry:
   %alloca = alloca [3 x i32], addrspace(5)
   %a1 = getelementptr [3 x i32], ptr addrspace(5) %alloca, i32 0, i32 1
@@ -34,7 +34,7 @@ entry:
   store i32 0, ptr addrspace(5) %alloca
   store i32 1, ptr addrspace(5) %a1
   store i32 2, ptr addrspace(5) %a2
-  %tmp = getelementptr [3 x i32], ptr addrspace(5) %alloca, i64 0, i64 %index
+  %tmp = getelementptr [3 x i32], ptr addrspace(5) %alloca, i32 0, i32 %index
   %data = load atomic i32, ptr addrspace(5) %tmp acquire, align 4
   store i32 %data, ptr addrspace(1) %out
   ret void
@@ -47,7 +47,7 @@ entry:
 ; OPT: store atomic i32 1, ptr addrspace(5)
 ; OPT: store atomic i32 2, ptr addrspace(5)
 ; OPT: load i32, ptr addrspace(5)
-define amdgpu_kernel void @vector_alloca_atomic_write(ptr addrspace(1) %out, i64 %index) {
+define amdgpu_kernel void @vector_alloca_atomic_write(ptr addrspace(1) %out, i32 %index) {
 entry:
   %alloca = alloca [3 x i32], addrspace(5)
   %a1 = getelementptr [3 x i32], ptr addrspace(5) %alloca, i32 0, i32 1
@@ -55,7 +55,7 @@ entry:
   store atomic i32 0, ptr addrspace(5) %alloca release, align 4
   store atomic i32 1, ptr addrspace(5) %a1 release, align 4
   store atomic i32 2, ptr addrspace(5) %a2  release, align 4
-  %tmp = getelementptr [3 x i32], ptr addrspace(5) %alloca, i64 0, i64 %index
+  %tmp = getelementptr [3 x i32], ptr addrspace(5) %alloca, i32 0, i32 %index
   %data = load i32, ptr addrspace(5) %tmp
   store i32 %data, ptr addrspace(1) %out
   ret void
diff --git a/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll b/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
index 9c05f4d16cb4e..4a29f7e53e93a 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
@@ -72,7 +72,8 @@ entry:
 ; OPT-NOT:   alloca
 ; OPT: bb2:
 ; OPT:  %promotealloca = phi <6 x float> [ zeroinitializer, %bb ], [ %0, %bb2 ]
-; OPT:  %0 = insertelement <6 x float> %promotealloca, float %tmp71, i32 %tmp10
+; OPT: [[TMP:%tmp7.*]] = load float, ptr addrspace(1) %tmp5, align 4
+; OPT:  %0 = insertelement <6 x float> %promotealloca, float [[TMP]], i32 %tmp10
 ; OPT: .preheader:
 ; OPT:  %bc = bitcast <6 x float> %0 to <6 x i32>
 ; OPT:  %1 = extractelement <6 x i32> %bc, i32 %tmp20
@@ -132,7 +133,8 @@ bb15:                                             ; preds = %.preheader
 ; OPT-NOT:   alloca
 ; OPT: bb2:
 ; OPT:  %promotealloca = phi <6 x double> [ zeroinitializer, %bb ], [ %0, %bb2 ]
-; OPT:  %0 = insertelement <6 x double> %promotealloca, double %tmp71, i32 %tmp10
+; OPT:  [[TMP:%tmp7.*]] = load double, ptr addrspace(1) %tmp5, align 8
+; OPT:  %0 = insertelement <6 x double> %promotealloca, double [[TMP]], i32 %tmp10
 ; OPT: .preheader:
 ; OPT:  %bc = bitcast <6 x double> %0 to <6 x i64>
 ; OPT:  %1 = extractelement <6 x i64> %bc, i32 %tmp20



More information about the llvm-commits mailing list