[llvm] 8f071fe - AMDGPU: Use named values in a test

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 28 07:35:35 PST 2022


Author: Matt Arsenault
Date: 2022-11-28T10:35:29-05:00
New Revision: 8f071fecfe0bbf1ddfc3e2ca024ad6fa3aa024f0

URL: https://github.com/llvm/llvm-project/commit/8f071fecfe0bbf1ddfc3e2ca024ad6fa3aa024f0
DIFF: https://github.com/llvm/llvm-project/commit/8f071fecfe0bbf1ddfc3e2ca024ad6fa3aa024f0.diff

LOG: AMDGPU: Use named values in a test

As always, these were an obstacle to test updates.

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/fence-barrier.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/fence-barrier.ll b/llvm/test/CodeGen/AMDGPU/fence-barrier.ll
index 8f5a06d01fa2..106c88be38ae 100644
--- a/llvm/test/CodeGen/AMDGPU/fence-barrier.ll
+++ b/llvm/test/CodeGen/AMDGPU/fence-barrier.ll
@@ -10,46 +10,48 @@ declare void @llvm.amdgcn.s.barrier()
 @test_local.temp = internal addrspace(3) global [1 x i32] undef, align 4
 @test_global_local.temp = internal addrspace(3) global [1 x i32] undef, align 4
 
+
 ; GCN-LABEL: {{^}}test_local
 ; GCN: v_mov_b32_e32 v[[VAL:[0-9]+]], 0x777
 ; GCN: ds_write_b32 v{{[0-9]+}}, v[[VAL]]
 ; GCN: s_waitcnt lgkmcnt(0){{$}}
 ; GCN-NEXT: s_barrier
 ; GCN: flat_store_dword
-define amdgpu_kernel void @test_local(i32 addrspace(1)*) {
-  %2 = alloca i32 addrspace(1)*, align 4, addrspace(5)
-  store i32 addrspace(1)* %0, i32 addrspace(1)* addrspace(5)* %2, align 4
-  %3 = call i32 @llvm.amdgcn.workitem.id.x()
-  %4 = zext i32 %3 to i64
-  %5 = icmp eq i64 %4, 0
-  br i1 %5, label %6, label %7
+define amdgpu_kernel void @test_local(i32 addrspace(1)* %arg) {
+bb:
+  %i = alloca i32 addrspace(1)*, align 4, addrspace(5)
+  store i32 addrspace(1)* %arg, i32 addrspace(1)* addrspace(5)* %i, align 4
+  %i1 = call i32 @llvm.amdgcn.workitem.id.x()
+  %i2 = zext i32 %i1 to i64
+  %i3 = icmp eq i64 %i2, 0
+  br i1 %i3, label %bb4, label %bb5
 
-; <label>:6:                                      ; preds = %1
+bb4:                                              ; preds = %bb
   store i32 1911, i32 addrspace(3)* getelementptr inbounds ([1 x i32], [1 x i32] addrspace(3)* @test_local.temp, i64 0, i64 0), align 4
-  br label %7
+  br label %bb5
 
-; <label>:7:                                      ; preds = %6, %1
+bb5:                                              ; preds = %bb4, %bb
   fence syncscope("workgroup") release
   call void @llvm.amdgcn.s.barrier()
   fence syncscope("workgroup") acquire
-  %8 = load i32, i32 addrspace(3)* getelementptr inbounds ([1 x i32], [1 x i32] addrspace(3)* @test_local.temp, i64 0, i64 0), align 4
-  %9 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %2, align 4
-  %10 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-  %11 = call i32 @llvm.amdgcn.workitem.id.x()
-  %12 = call i32 @llvm.amdgcn.workgroup.id.x()
-  %13 = getelementptr inbounds i8, i8 addrspace(4)* %10, i64 4
-  %14 = bitcast i8 addrspace(4)* %13 to i16 addrspace(4)*
-  %15 = load i16, i16 addrspace(4)* %14, align 4
-  %16 = zext i16 %15 to i32
-  %17 = mul i32 %12, %16
-  %18 = add i32 %17, %11
-  %19 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %20 = zext i32 %18 to i64
-  %21 = bitcast i8 addrspace(4)* %19 to i64 addrspace(4)*
-  %22 = load i64, i64 addrspace(4)* %21, align 8
-  %23 = add i64 %22, %20
-  %24 = getelementptr inbounds i32, i32 addrspace(1)* %9, i64 %23
-  store i32 %8, i32 addrspace(1)* %24, align 4
+  %i6 = load i32, i32 addrspace(3)* getelementptr inbounds ([1 x i32], [1 x i32] addrspace(3)* @test_local.temp, i64 0, i64 0), align 4
+  %i7 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %i, align 4
+  %i8 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+  %i9 = call i32 @llvm.amdgcn.workitem.id.x()
+  %i10 = call i32 @llvm.amdgcn.workgroup.id.x()
+  %i11 = getelementptr inbounds i8, i8 addrspace(4)* %i8, i64 4
+  %i12 = bitcast i8 addrspace(4)* %i11 to i16 addrspace(4)*
+  %i13 = load i16, i16 addrspace(4)* %i12, align 4
+  %i14 = zext i16 %i13 to i32
+  %i15 = mul i32 %i10, %i14
+  %i16 = add i32 %i15, %i9
+  %i17 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+  %i18 = zext i32 %i16 to i64
+  %i19 = bitcast i8 addrspace(4)* %i17 to i64 addrspace(4)*
+  %i20 = load i64, i64 addrspace(4)* %i19, align 8
+  %i21 = add i64 %i20, %i18
+  %i22 = getelementptr inbounds i32, i32 addrspace(1)* %i7, i64 %i21
+  store i32 %i6, i32 addrspace(1)* %i22, align 4
   ret void
 }
 
@@ -58,82 +60,83 @@ define amdgpu_kernel void @test_local(i32 addrspace(1)*) {
 ; GCN: flat_store_dword
 ; GCN: s_waitcnt vmcnt(0) lgkmcnt(0){{$}}
 ; GCN-NEXT: s_barrier
-define amdgpu_kernel void @test_global(i32 addrspace(1)*) {
-  %2 = alloca i32 addrspace(1)*, align 4, addrspace(5)
-  %3 = alloca i32, align 4, addrspace(5)
-  store i32 addrspace(1)* %0, i32 addrspace(1)* addrspace(5)* %2, align 4
-  store i32 0, i32 addrspace(5)* %3, align 4
-  br label %4
+define amdgpu_kernel void @test_global(i32 addrspace(1)* %arg) {
+bb:
+  %i = alloca i32 addrspace(1)*, align 4, addrspace(5)
+  %i1 = alloca i32, align 4, addrspace(5)
+  store i32 addrspace(1)* %arg, i32 addrspace(1)* addrspace(5)* %i, align 4
+  store i32 0, i32 addrspace(5)* %i1, align 4
+  br label %bb2
 
-; <label>:4:                                      ; preds = %58, %1
-  %5 = load i32, i32 addrspace(5)* %3, align 4
-  %6 = sext i32 %5 to i64
-  %7 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-  %8 = call i32 @llvm.amdgcn.workitem.id.x()
-  %9 = call i32 @llvm.amdgcn.workgroup.id.x()
-  %10 = getelementptr inbounds i8, i8 addrspace(4)* %7, i64 4
-  %11 = bitcast i8 addrspace(4)* %10 to i16 addrspace(4)*
-  %12 = load i16, i16 addrspace(4)* %11, align 4
-  %13 = zext i16 %12 to i32
-  %14 = mul i32 %9, %13
-  %15 = add i32 %14, %8
-  %16 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %17 = zext i32 %15 to i64
-  %18 = bitcast i8 addrspace(4)* %16 to i64 addrspace(4)*
-  %19 = load i64, i64 addrspace(4)* %18, align 8
-  %20 = add i64 %19, %17
-  %21 = icmp ult i64 %6, %20
-  br i1 %21, label %22, label %61
+bb2:                                              ; preds = %bb56, %bb
+  %i3 = load i32, i32 addrspace(5)* %i1, align 4
+  %i4 = sext i32 %i3 to i64
+  %i5 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+  %i6 = call i32 @llvm.amdgcn.workitem.id.x()
+  %i7 = call i32 @llvm.amdgcn.workgroup.id.x()
+  %i8 = getelementptr inbounds i8, i8 addrspace(4)* %i5, i64 4
+  %i9 = bitcast i8 addrspace(4)* %i8 to i16 addrspace(4)*
+  %i10 = load i16, i16 addrspace(4)* %i9, align 4
+  %i11 = zext i16 %i10 to i32
+  %i12 = mul i32 %i7, %i11
+  %i13 = add i32 %i12, %i6
+  %i14 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+  %i15 = zext i32 %i13 to i64
+  %i16 = bitcast i8 addrspace(4)* %i14 to i64 addrspace(4)*
+  %i17 = load i64, i64 addrspace(4)* %i16, align 8
+  %i18 = add i64 %i17, %i15
+  %i19 = icmp ult i64 %i4, %i18
+  br i1 %i19, label %bb20, label %bb59
 
-; <label>:22:                                     ; preds = %4
-  %23 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-  %24 = call i32 @llvm.amdgcn.workitem.id.x()
-  %25 = call i32 @llvm.amdgcn.workgroup.id.x()
-  %26 = getelementptr inbounds i8, i8 addrspace(4)* %23, i64 4
-  %27 = bitcast i8 addrspace(4)* %26 to i16 addrspace(4)*
-  %28 = load i16, i16 addrspace(4)* %27, align 4
-  %29 = zext i16 %28 to i32
-  %30 = mul i32 %25, %29
-  %31 = add i32 %30, %24
-  %32 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %33 = zext i32 %31 to i64
-  %34 = bitcast i8 addrspace(4)* %32 to i64 addrspace(4)*
-  %35 = load i64, i64 addrspace(4)* %34, align 8
-  %36 = add i64 %35, %33
-  %37 = add i64 %36, 2184
-  %38 = trunc i64 %37 to i32
-  %39 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %2, align 4
-  %40 = load i32, i32 addrspace(5)* %3, align 4
-  %41 = sext i32 %40 to i64
-  %42 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-  %43 = call i32 @llvm.amdgcn.workitem.id.x()
-  %44 = call i32 @llvm.amdgcn.workgroup.id.x()
-  %45 = getelementptr inbounds i8, i8 addrspace(4)* %42, i64 4
-  %46 = bitcast i8 addrspace(4)* %45 to i16 addrspace(4)*
-  %47 = load i16, i16 addrspace(4)* %46, align 4
-  %48 = zext i16 %47 to i32
-  %49 = mul i32 %44, %48
-  %50 = add i32 %49, %43
-  %51 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %52 = zext i32 %50 to i64
-  %53 = bitcast i8 addrspace(4)* %51 to i64 addrspace(4)*
-  %54 = load i64, i64 addrspace(4)* %53, align 8
-  %55 = add i64 %54, %52
-  %56 = add i64 %41, %55
-  %57 = getelementptr inbounds i32, i32 addrspace(1)* %39, i64 %56
-  store i32 %38, i32 addrspace(1)* %57, align 4
+bb20:                                             ; preds = %bb2
+  %i21 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+  %i22 = call i32 @llvm.amdgcn.workitem.id.x()
+  %i23 = call i32 @llvm.amdgcn.workgroup.id.x()
+  %i24 = getelementptr inbounds i8, i8 addrspace(4)* %i21, i64 4
+  %i25 = bitcast i8 addrspace(4)* %i24 to i16 addrspace(4)*
+  %i26 = load i16, i16 addrspace(4)* %i25, align 4
+  %i27 = zext i16 %i26 to i32
+  %i28 = mul i32 %i23, %i27
+  %i29 = add i32 %i28, %i22
+  %i30 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+  %i31 = zext i32 %i29 to i64
+  %i32 = bitcast i8 addrspace(4)* %i30 to i64 addrspace(4)*
+  %i33 = load i64, i64 addrspace(4)* %i32, align 8
+  %i34 = add i64 %i33, %i31
+  %i35 = add i64 %i34, 2184
+  %i36 = trunc i64 %i35 to i32
+  %i37 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %i, align 4
+  %i38 = load i32, i32 addrspace(5)* %i1, align 4
+  %i39 = sext i32 %i38 to i64
+  %i40 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+  %i41 = call i32 @llvm.amdgcn.workitem.id.x()
+  %i42 = call i32 @llvm.amdgcn.workgroup.id.x()
+  %i43 = getelementptr inbounds i8, i8 addrspace(4)* %i40, i64 4
+  %i44 = bitcast i8 addrspace(4)* %i43 to i16 addrspace(4)*
+  %i45 = load i16, i16 addrspace(4)* %i44, align 4
+  %i46 = zext i16 %i45 to i32
+  %i47 = mul i32 %i42, %i46
+  %i48 = add i32 %i47, %i41
+  %i49 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+  %i50 = zext i32 %i48 to i64
+  %i51 = bitcast i8 addrspace(4)* %i49 to i64 addrspace(4)*
+  %i52 = load i64, i64 addrspace(4)* %i51, align 8
+  %i53 = add i64 %i52, %i50
+  %i54 = add i64 %i39, %i53
+  %i55 = getelementptr inbounds i32, i32 addrspace(1)* %i37, i64 %i54
+  store i32 %i36, i32 addrspace(1)* %i55, align 4
   fence syncscope("workgroup") release
   call void @llvm.amdgcn.s.barrier()
   fence syncscope("workgroup") acquire
-  br label %58
+  br label %bb56
 
-; <label>:58:                                     ; preds = %22
-  %59 = load i32, i32 addrspace(5)* %3, align 4
-  %60 = add nsw i32 %59, 1
-  store i32 %60, i32 addrspace(5)* %3, align 4
-  br label %4
+bb56:                                             ; preds = %bb20
+  %i57 = load i32, i32 addrspace(5)* %i1, align 4
+  %i58 = add nsw i32 %i57, 1
+  store i32 %i58, i32 addrspace(5)* %i1, align 4
+  br label %bb2
 
-; <label>:61:                                     ; preds = %4
+bb59:                                             ; preds = %bb2
   ret void
 }
 
@@ -143,56 +146,57 @@ define amdgpu_kernel void @test_global(i32 addrspace(1)*) {
 ; GCN: s_waitcnt vmcnt(0) lgkmcnt(0){{$}}
 ; GCN-NEXT: s_barrier
 ; GCN: flat_store_dword
-define amdgpu_kernel void @test_global_local(i32 addrspace(1)*) {
-  %2 = alloca i32 addrspace(1)*, align 4, addrspace(5)
-  store i32 addrspace(1)* %0, i32 addrspace(1)* addrspace(5)* %2, align 4
-  %3 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %2, align 4
-  %4 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-  %5 = call i32 @llvm.amdgcn.workitem.id.x()
-  %6 = call i32 @llvm.amdgcn.workgroup.id.x()
-  %7 = getelementptr inbounds i8, i8 addrspace(4)* %4, i64 4
-  %8 = bitcast i8 addrspace(4)* %7 to i16 addrspace(4)*
-  %9 = load i16, i16 addrspace(4)* %8, align 4
-  %10 = zext i16 %9 to i32
-  %11 = mul i32 %6, %10
-  %12 = add i32 %11, %5
-  %13 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %14 = zext i32 %12 to i64
-  %15 = bitcast i8 addrspace(4)* %13 to i64 addrspace(4)*
-  %16 = load i64, i64 addrspace(4)* %15, align 8
-  %17 = add i64 %16, %14
-  %18 = getelementptr inbounds i32, i32 addrspace(1)* %3, i64 %17
-  store i32 1, i32 addrspace(1)* %18, align 4
-  %19 = call i32 @llvm.amdgcn.workitem.id.x()
-  %20 = zext i32 %19 to i64
-  %21 = icmp eq i64 %20, 0
-  br i1 %21, label %22, label %23
+define amdgpu_kernel void @test_global_local(i32 addrspace(1)* %arg) {
+bb:
+  %i = alloca i32 addrspace(1)*, align 4, addrspace(5)
+  store i32 addrspace(1)* %arg, i32 addrspace(1)* addrspace(5)* %i, align 4
+  %i1 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %i, align 4
+  %i2 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+  %i3 = call i32 @llvm.amdgcn.workitem.id.x()
+  %i4 = call i32 @llvm.amdgcn.workgroup.id.x()
+  %i5 = getelementptr inbounds i8, i8 addrspace(4)* %i2, i64 4
+  %i6 = bitcast i8 addrspace(4)* %i5 to i16 addrspace(4)*
+  %i7 = load i16, i16 addrspace(4)* %i6, align 4
+  %i8 = zext i16 %i7 to i32
+  %i9 = mul i32 %i4, %i8
+  %i10 = add i32 %i9, %i3
+  %i11 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+  %i12 = zext i32 %i10 to i64
+  %i13 = bitcast i8 addrspace(4)* %i11 to i64 addrspace(4)*
+  %i14 = load i64, i64 addrspace(4)* %i13, align 8
+  %i15 = add i64 %i14, %i12
+  %i16 = getelementptr inbounds i32, i32 addrspace(1)* %i1, i64 %i15
+  store i32 1, i32 addrspace(1)* %i16, align 4
+  %i17 = call i32 @llvm.amdgcn.workitem.id.x()
+  %i18 = zext i32 %i17 to i64
+  %i19 = icmp eq i64 %i18, 0
+  br i1 %i19, label %bb20, label %bb21
 
-; <label>:22:                                     ; preds = %1
+bb20:                                             ; preds = %bb
   store i32 2457, i32 addrspace(3)* getelementptr inbounds ([1 x i32], [1 x i32] addrspace(3)* @test_global_local.temp, i64 0, i64 0), align 4
-  br label %23
+  br label %bb21
 
-; <label>:23:                                     ; preds = %22, %1
+bb21:                                             ; preds = %bb20, %bb
   fence syncscope("workgroup") release
   call void @llvm.amdgcn.s.barrier()
   fence syncscope("workgroup") acquire
-  %24 = load i32, i32 addrspace(3)* getelementptr inbounds ([1 x i32], [1 x i32] addrspace(3)* @test_global_local.temp, i64 0, i64 0), align 4
-  %25 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %2, align 4
-  %26 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-  %27 = call i32 @llvm.amdgcn.workitem.id.x()
-  %28 = call i32 @llvm.amdgcn.workgroup.id.x()
-  %29 = getelementptr inbounds i8, i8 addrspace(4)* %26, i64 4
-  %30 = bitcast i8 addrspace(4)* %29 to i16 addrspace(4)*
-  %31 = load i16, i16 addrspace(4)* %30, align 4
-  %32 = zext i16 %31 to i32
-  %33 = mul i32 %28, %32
-  %34 = add i32 %33, %27
-  %35 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %36 = zext i32 %34 to i64
-  %37 = bitcast i8 addrspace(4)* %35 to i64 addrspace(4)*
-  %38 = load i64, i64 addrspace(4)* %37, align 8
-  %39 = add i64 %38, %36
-  %40 = getelementptr inbounds i32, i32 addrspace(1)* %25, i64 %39
-  store i32 %24, i32 addrspace(1)* %40, align 4
+  %i22 = load i32, i32 addrspace(3)* getelementptr inbounds ([1 x i32], [1 x i32] addrspace(3)* @test_global_local.temp, i64 0, i64 0), align 4
+  %i23 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %i, align 4
+  %i24 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+  %i25 = call i32 @llvm.amdgcn.workitem.id.x()
+  %i26 = call i32 @llvm.amdgcn.workgroup.id.x()
+  %i27 = getelementptr inbounds i8, i8 addrspace(4)* %i24, i64 4
+  %i28 = bitcast i8 addrspace(4)* %i27 to i16 addrspace(4)*
+  %i29 = load i16, i16 addrspace(4)* %i28, align 4
+  %i30 = zext i16 %i29 to i32
+  %i31 = mul i32 %i26, %i30
+  %i32 = add i32 %i31, %i25
+  %i33 = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+  %i34 = zext i32 %i32 to i64
+  %i35 = bitcast i8 addrspace(4)* %i33 to i64 addrspace(4)*
+  %i36 = load i64, i64 addrspace(4)* %i35, align 8
+  %i37 = add i64 %i36, %i34
+  %i38 = getelementptr inbounds i32, i32 addrspace(1)* %i23, i64 %i37
+  store i32 %i22, i32 addrspace(1)* %i38, align 4
   ret void
 }


        


More information about the llvm-commits mailing list