[llvm] a49b5ca - [InferAddressSpaces] Generate test checks (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed May 29 06:27:56 PDT 2024


Author: Nikita Popov
Date: 2024-05-29T15:26:59+02:00
New Revision: a49b5cad99ff84c2c9c55db1d5d9d4bfe1411777

URL: https://github.com/llvm/llvm-project/commit/a49b5cad99ff84c2c9c55db1d5d9d4bfe1411777
DIFF: https://github.com/llvm/llvm-project/commit/a49b5cad99ff84c2c9c55db1d5d9d4bfe1411777.diff

LOG: [InferAddressSpaces] Generate test checks (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll
    llvm/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll
index 72109d0cff437..4290e4f705887 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll
@@ -1,34 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=infer-address-spaces %s | FileCheck %s
 ; Ports of most of test/CodeGen/NVPTX/access-non-generic.ll
 
 @scalar = internal addrspace(3) global float 0.0, align 4
 @array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
 
-; CHECK-LABEL: @load_store_lds_f32(
-; CHECK: %tmp = load float, ptr addrspace(3) @scalar, align 4
-; CHECK: call void @use(float %tmp)
-; CHECK: store float %v, ptr addrspace(3) @scalar, align 4
-; CHECK: call void @llvm.amdgcn.s.barrier()
-; CHECK: %tmp2 = load float, ptr addrspace(3) @scalar, align 4
-; CHECK: call void @use(float %tmp2)
-; CHECK: store float %v, ptr addrspace(3) @scalar, align 4
-; CHECK: call void @llvm.amdgcn.s.barrier()
-; CHECK: %tmp3 = load float, ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i32 0, i32 5), align 4
-; CHECK: call void @use(float %tmp3)
-; CHECK: store float %v, ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i32 0, i32 5), align 4
-; CHECK: call void @llvm.amdgcn.s.barrier()
-; CHECK: %tmp4 = getelementptr inbounds [10 x float], ptr addrspace(3) @array, i32 0, i32 5
-; CHECK: %tmp5 = load float, ptr addrspace(3) %tmp4, align 4
-; CHECK: call void @use(float %tmp5)
-; CHECK: store float %v, ptr addrspace(3) %tmp4, align 4
-; CHECK: call void @llvm.amdgcn.s.barrier()
-; CHECK: %tmp7 = getelementptr inbounds [10 x float], ptr addrspace(3) @array, i32 0, i32 %i
-; CHECK: %tmp8 = load float, ptr addrspace(3) %tmp7, align 4
-; CHECK: call void @use(float %tmp8)
-; CHECK: store float %v, ptr addrspace(3) %tmp7, align 4
-; CHECK: call void @llvm.amdgcn.s.barrier()
-; CHECK: ret void
 define amdgpu_kernel void @load_store_lds_f32(i32 %i, float %v) #0 {
+; CHECK-LABEL: define amdgpu_kernel void @load_store_lds_f32(
+; CHECK-SAME: i32 [[I:%.*]], float [[V:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP:%.*]] = load float, ptr addrspace(3) @scalar, align 4
+; CHECK-NEXT:    call void @use(float [[TMP]])
+; CHECK-NEXT:    store float [[V]], ptr addrspace(3) @scalar, align 4
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP2:%.*]] = load float, ptr addrspace(3) @scalar, align 4
+; CHECK-NEXT:    call void @use(float [[TMP2]])
+; CHECK-NEXT:    store float [[V]], ptr addrspace(3) @scalar, align 4
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i32 0, i32 5), align 4
+; CHECK-NEXT:    call void @use(float [[TMP3]])
+; CHECK-NEXT:    store float [[V]], ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i32 0, i32 5), align 4
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [10 x float], ptr addrspace(3) @array, i32 0, i32 5
+; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr addrspace(3) [[TMP4]], align 4
+; CHECK-NEXT:    call void @use(float [[TMP5]])
+; CHECK-NEXT:    store float [[V]], ptr addrspace(3) [[TMP4]], align 4
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [10 x float], ptr addrspace(3) @array, i32 0, i32 [[I]]
+; CHECK-NEXT:    [[TMP8:%.*]] = load float, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    call void @use(float [[TMP8]])
+; CHECK-NEXT:    store float [[V]], ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    ret void
+;
 bb:
   %tmp = load float, ptr addrspacecast (ptr addrspace(3) @scalar to ptr), align 4
   call void @use(float %tmp)
@@ -57,20 +61,27 @@ bb:
   ret void
 }
 
-; CHECK-LABEL: @constexpr_load_int_from_float_lds(
-; CHECK: %tmp = load i32, ptr addrspace(3) @scalar, align 4
 define i32 @constexpr_load_int_from_float_lds() #0 {
+; CHECK-LABEL: define i32 @constexpr_load_int_from_float_lds(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP:%.*]] = load i32, ptr addrspace(3) @scalar, align 4
+; CHECK-NEXT:    ret i32 [[TMP]]
+;
 bb:
   %tmp = load i32, ptr addrspacecast (ptr addrspace(3) @scalar to ptr), align 4
   ret i32 %tmp
 }
 
-; CHECK-LABEL: @load_int_from_global_float(
-; CHECK: %tmp1 = getelementptr float, ptr addrspace(1) %input, i32 %i
-; CHECK: %tmp2 = getelementptr float, ptr addrspace(1) %tmp1, i32 %j
-; CHECK: %tmp4 = load i32, ptr addrspace(1) %tmp2
-; CHECK: ret i32 %tmp4
 define i32 @load_int_from_global_float(ptr addrspace(1) %input, i32 %i, i32 %j) #0 {
+; CHECK-LABEL: define i32 @load_int_from_global_float(
+; CHECK-SAME: ptr addrspace(1) [[INPUT:%.*]], i32 [[I:%.*]], i32 [[J:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr float, ptr addrspace(1) [[INPUT]], i32 [[I]]
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr float, ptr addrspace(1) [[TMP1]], i32 [[J]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    ret i32 [[TMP4]]
+;
 bb:
   %tmp = addrspacecast ptr addrspace(1) %input to ptr
   %tmp1 = getelementptr float, ptr %tmp, i32 %i
@@ -79,20 +90,26 @@ bb:
   ret i32 %tmp4
 }
 
-; CHECK-LABEL: @nested_const_expr(
-; CHECK: store i32 1, ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i64 0, i64 1), align 4
 define amdgpu_kernel void @nested_const_expr() #0 {
+; CHECK-LABEL: define amdgpu_kernel void @nested_const_expr(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT:    store i32 1, ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i64 0, i64 1), align 4
+; CHECK-NEXT:    ret void
+;
   store i32 1, ptr bitcast (ptr getelementptr ([10 x float], ptr addrspacecast (ptr addrspace(3) @array to ptr), i64 0, i64 1) to ptr), align 4
 
   ret void
 }
 
-; CHECK-LABEL: @rauw(
-; CHECK: %addr = getelementptr float, ptr addrspace(1) %input, i64 10
-; CHECK-NEXT: %v = load float, ptr addrspace(1) %addr
-; CHECK-NEXT: store float %v, ptr addrspace(1) %addr
-; CHECK-NEXT: ret void
 define amdgpu_kernel void @rauw(ptr addrspace(1) %input) #0 {
+; CHECK-LABEL: define amdgpu_kernel void @rauw(
+; CHECK-SAME: ptr addrspace(1) [[INPUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[ADDR:%.*]] = getelementptr float, ptr addrspace(1) [[INPUT]], i64 10
+; CHECK-NEXT:    [[V:%.*]] = load float, ptr addrspace(1) [[ADDR]], align 4
+; CHECK-NEXT:    store float [[V]], ptr addrspace(1) [[ADDR]], align 4
+; CHECK-NEXT:    ret void
+;
 bb:
   %generic_input = addrspacecast ptr addrspace(1) %input to ptr
   %addr = getelementptr float, ptr %generic_input, i64 10
@@ -102,20 +119,22 @@ bb:
 }
 
 ; FIXME: Should be able to eliminate the cast inside the loop
-; CHECK-LABEL: @loop(
-
-; CHECK: %end = getelementptr float, ptr addrspace(3) @array, i64 10
-; CHECK: br label %loop
-
-; CHECK: loop:                                             ; preds = %loop, %entry
-; CHECK: %i = phi ptr addrspace(3) [ @array, %entry ], [ %i2, %loop ]
-; CHECK: %v = load float, ptr addrspace(3) %i
-; CHECK: call void @use(float %v)
-; CHECK: %i2 = getelementptr float, ptr addrspace(3) %i, i64 1
-; CHECK: %exit_cond = icmp eq ptr addrspace(3) %i2, %end
-
-; CHECK: br i1 %exit_cond, label %exit, label %loop
 define amdgpu_kernel void @loop() #0 {
+; CHECK-LABEL: define amdgpu_kernel void @loop(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[END:%.*]] = getelementptr float, ptr addrspace(3) @array, i64 10
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[I:%.*]] = phi ptr addrspace(3) [ @array, %[[ENTRY]] ], [ [[I2:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[V:%.*]] = load float, ptr addrspace(3) [[I]], align 4
+; CHECK-NEXT:    call void @use(float [[V]])
+; CHECK-NEXT:    [[I2]] = getelementptr float, ptr addrspace(3) [[I]], i64 1
+; CHECK-NEXT:    [[EXIT_COND:%.*]] = icmp eq ptr addrspace(3) [[I2]], [[END]]
+; CHECK-NEXT:    br i1 [[EXIT_COND]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   %p = addrspacecast ptr addrspace(3) @array to ptr
   %end = getelementptr float, ptr %p, i64 10
@@ -135,19 +154,23 @@ exit:                                             ; preds = %loop
 
 @generic_end = external addrspace(1) global ptr
 
-; CHECK-LABEL: @loop_with_generic_bound(
-; CHECK: %end = load ptr, ptr addrspace(1) @generic_end
-; CHECK: br label %loop
-
-; CHECK: loop:
-; CHECK: %i = phi ptr addrspace(3) [ @array, %entry ], [ %i2, %loop ]
-; CHECK: %v = load float, ptr addrspace(3) %i
-; CHECK: call void @use(float %v)
-; CHECK: %i2 = getelementptr float, ptr addrspace(3) %i, i64 1
-; CHECK: %0 = addrspacecast ptr addrspace(3) %i2 to ptr
-; CHECK: %exit_cond = icmp eq ptr %0, %end
-; CHECK: br i1 %exit_cond, label %exit, label %loop
 define amdgpu_kernel void @loop_with_generic_bound() #0 {
+; CHECK-LABEL: define amdgpu_kernel void @loop_with_generic_bound(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[END:%.*]] = load ptr, ptr addrspace(1) @generic_end, align 8
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[I:%.*]] = phi ptr addrspace(3) [ @array, %[[ENTRY]] ], [ [[I2:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[V:%.*]] = load float, ptr addrspace(3) [[I]], align 4
+; CHECK-NEXT:    call void @use(float [[V]])
+; CHECK-NEXT:    [[I2]] = getelementptr float, ptr addrspace(3) [[I]], i64 1
+; CHECK-NEXT:    [[TMP0:%.*]] = addrspacecast ptr addrspace(3) [[I2]] to ptr
+; CHECK-NEXT:    [[EXIT_COND:%.*]] = icmp eq ptr [[TMP0]], [[END]]
+; CHECK-NEXT:    br i1 [[EXIT_COND]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   %p = addrspacecast ptr addrspace(3) @array to ptr
   %end = load ptr, ptr addrspace(1) @generic_end
@@ -165,11 +188,14 @@ exit:                                             ; preds = %loop
   ret void
 }
 
-; CHECK-LABEL: @select_bug(
-; CHECK: %sel = select i1 icmp ne (ptr inttoptr (i64 4873 to ptr), ptr null), i64 73, i64 93
-; CHECK: %add.ptr157 = getelementptr inbounds i64, ptr undef, i64 %sel
-; CHECK: %cmp169 = icmp uge ptr undef, %add.ptr157
 define void @select_bug() #0 {
+; CHECK-LABEL: define void @select_bug(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 icmp ne (ptr inttoptr (i64 4873 to ptr), ptr null), i64 73, i64 93
+; CHECK-NEXT:    [[ADD_PTR157:%.*]] = getelementptr inbounds i64, ptr undef, i64 [[SEL]]
+; CHECK-NEXT:    [[CMP169:%.*]] = icmp uge ptr undef, [[ADD_PTR157]]
+; CHECK-NEXT:    unreachable
+;
   %sel = select i1 icmp ne (ptr inttoptr (i64 4873 to ptr), ptr null), i64 73, i64 93
   %add.ptr157 = getelementptr inbounds i64, ptr undef, i64 %sel
   %cmp169 = icmp uge ptr undef, %add.ptr157

diff  --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll
index e6b517a73fa46..23c5f99e5d086 100644
--- a/llvm/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt -S -mtriple=nvptx64-nvidia-cuda -passes=infer-address-spaces %s | FileCheck %s
 
 target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
@@ -6,18 +7,23 @@ target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
 
 @var1 = local_unnamed_addr addrspace(3) externally_initialized global %struct.bar undef, align 8
 
-; CHECK-LABEL: @bug31948(
-; CHECK: %tmp = load ptr, ptr addrspace(3) getelementptr inbounds (%struct.bar, ptr addrspace(3) @var1, i64 0, i32 1), align 8
-; CHECK: %tmp1 = load float, ptr %tmp, align 4
-; CHECK: store float %conv1, ptr %tmp, align 4
-; CHECK: store i32 32, ptr addrspace(3) getelementptr inbounds (%struct.bar, ptr addrspace(3) @var1, i64 0, i32 1), align 4
 define void @bug31948(float %a, ptr nocapture readnone %x, ptr nocapture readnone %y) local_unnamed_addr #0 {
+; CHECK-LABEL: define void @bug31948(
+; CHECK-SAME: float [[A:%.*]], ptr nocapture readnone [[X:%.*]], ptr nocapture readnone [[Y:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP:%.*]] = load ptr, ptr addrspace(3) getelementptr inbounds ([[STRUCT_BAR:%.*]], ptr addrspace(3) @var1, i64 0, i32 1), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[TMP]], align 4
+; CHECK-NEXT:    [[CONV1:%.*]] = fadd float [[TMP1]], 1.000000e+00
+; CHECK-NEXT:    store float [[CONV1]], ptr [[TMP]], align 4
+; CHECK-NEXT:    store i32 32, ptr addrspace(3) getelementptr inbounds ([[STRUCT_BAR]], ptr addrspace(3) @var1, i64 0, i32 1), align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %tmp = load ptr, ptr getelementptr (%struct.bar, ptr addrspacecast (ptr addrspace(3) @var1 to ptr), i64 0, i32 1), align 8
   %tmp1 = load float, ptr %tmp, align 4
   %conv1 = fadd float %tmp1, 1.000000e+00
   store float %conv1, ptr %tmp, align 4
-  store i32 32, ptr bitcast (ptr getelementptr (%struct.bar, ptr addrspacecast (ptr addrspace(3) @var1 to ptr), i64 0, i32 1) to ptr), align 4
+  store i32 32, ptr getelementptr (%struct.bar, ptr addrspacecast (ptr addrspace(3) @var1 to ptr), i64 0, i32 1), align 4
   ret void
 }
 


        


More information about the llvm-commits mailing list