[llvm] eb4037f - [AMDGPU] Fix regenerated test checks (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 10 02:57:02 PST 2022


Author: Nikita Popov
Date: 2022-03-10T11:56:17+01:00
New Revision: eb4037ff42e1d98901f0bbfea38aca09d1f88fe8

URL: https://github.com/llvm/llvm-project/commit/eb4037ff42e1d98901f0bbfea38aca09d1f88fe8
DIFF: https://github.com/llvm/llvm-project/commit/eb4037ff42e1d98901f0bbfea38aca09d1f88fe8.diff

LOG: [AMDGPU] Fix regenerated test checks (NFC)

I used the wrong build to generate the checks, sorry :(

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-icmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-icmp.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-icmp.ll
index b1f3402c69f16..24aa35ccf9892 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-icmp.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-icmp.ll
@@ -7,10 +7,25 @@
 
 define amdgpu_kernel void @lds_promoted_alloca_icmp_same_derived_pointer(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 ; CHECK-LABEL: @lds_promoted_alloca_icmp_same_derived_pointer(
-; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [16 x i32], align 4
-; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [16 x i32], [16 x i32]* [[ALLOCA]], i32 0, i32 [[A:%.*]]
-; CHECK-NEXT:    [[PTR1:%.*]] = getelementptr inbounds [16 x i32], [16 x i32]* [[ALLOCA]], i32 0, i32 [[B:%.*]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32* [[PTR0]], [[PTR1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call noalias nonnull dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8 addrspace(4)* [[TMP1]] to i32 addrspace(4)*
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32 addrspace(4)* [[TMP2]], i64 1
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32 addrspace(4)* [[TMP3]], align 4, !invariant.load !0
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, i32 addrspace(4)* [[TMP2]], i64 2
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32 addrspace(4)* [[TMP5]], align 4, !range [[RNG1:![0-9]+]], !invariant.load !0
+; CHECK-NEXT:    [[TMP7:%.*]] = lshr i32 [[TMP4]], 16
+; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.amdgcn.workitem.id.x(), !range [[RNG2:![0-9]+]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call i32 @llvm.amdgcn.workitem.id.y(), !range [[RNG2]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call i32 @llvm.amdgcn.workitem.id.z(), !range [[RNG2]]
+; CHECK-NEXT:    [[TMP11:%.*]] = mul nuw nsw i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    [[TMP12:%.*]] = mul i32 [[TMP11]], [[TMP8]]
+; CHECK-NEXT:    [[TMP13:%.*]] = mul nuw nsw i32 [[TMP9]], [[TMP6]]
+; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[TMP12]], [[TMP13]]
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP14]], [[TMP10]]
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [256 x [16 x i32]], [256 x [16 x i32]] addrspace(3)* @lds_promoted_alloca_icmp_same_derived_pointer.alloca, i32 0, i32 [[TMP15]]
+; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [16 x i32], [16 x i32] addrspace(3)* [[TMP16]], i32 0, i32 [[A:%.*]]
+; CHECK-NEXT:    [[PTR1:%.*]] = getelementptr inbounds [16 x i32], [16 x i32] addrspace(3)* [[TMP16]], i32 0, i32 [[B:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 addrspace(3)* [[PTR0]], [[PTR1]]
 ; CHECK-NEXT:    [[ZEXT:%.*]] = zext i1 [[CMP]] to i32
 ; CHECK-NEXT:    store volatile i32 [[ZEXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
 ; CHECK-NEXT:    ret void
@@ -35,9 +50,24 @@ define amdgpu_kernel void @lds_promoted_alloca_icmp_same_derived_pointer(i32 add
 
 define amdgpu_kernel void @lds_promoted_alloca_icmp_null_rhs(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 ; CHECK-LABEL: @lds_promoted_alloca_icmp_null_rhs(
-; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [16 x i32], align 4
-; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [16 x i32], [16 x i32]* [[ALLOCA]], i32 0, i32 [[A:%.*]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32* [[PTR0]], null
+; CHECK-NEXT:    [[TMP1:%.*]] = call noalias nonnull dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8 addrspace(4)* [[TMP1]] to i32 addrspace(4)*
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32 addrspace(4)* [[TMP2]], i64 1
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32 addrspace(4)* [[TMP3]], align 4, !invariant.load !0
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, i32 addrspace(4)* [[TMP2]], i64 2
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32 addrspace(4)* [[TMP5]], align 4, !range [[RNG1]], !invariant.load !0
+; CHECK-NEXT:    [[TMP7:%.*]] = lshr i32 [[TMP4]], 16
+; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.amdgcn.workitem.id.x(), !range [[RNG2]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call i32 @llvm.amdgcn.workitem.id.y(), !range [[RNG2]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call i32 @llvm.amdgcn.workitem.id.z(), !range [[RNG2]]
+; CHECK-NEXT:    [[TMP11:%.*]] = mul nuw nsw i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    [[TMP12:%.*]] = mul i32 [[TMP11]], [[TMP8]]
+; CHECK-NEXT:    [[TMP13:%.*]] = mul nuw nsw i32 [[TMP9]], [[TMP6]]
+; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[TMP12]], [[TMP13]]
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP14]], [[TMP10]]
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [256 x [16 x i32]], [256 x [16 x i32]] addrspace(3)* @lds_promoted_alloca_icmp_null_rhs.alloca, i32 0, i32 [[TMP15]]
+; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [16 x i32], [16 x i32] addrspace(3)* [[TMP16]], i32 0, i32 [[A:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 addrspace(3)* [[PTR0]], null
 ; CHECK-NEXT:    [[ZEXT:%.*]] = zext i1 [[CMP]] to i32
 ; CHECK-NEXT:    store volatile i32 [[ZEXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
 ; CHECK-NEXT:    ret void
@@ -60,9 +90,24 @@ define amdgpu_kernel void @lds_promoted_alloca_icmp_null_rhs(i32 addrspace(1)* %
 
 define amdgpu_kernel void @lds_promoted_alloca_icmp_null_lhs(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 ; CHECK-LABEL: @lds_promoted_alloca_icmp_null_lhs(
-; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [16 x i32], align 4
-; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [16 x i32], [16 x i32]* [[ALLOCA]], i32 0, i32 [[A:%.*]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32* null, [[PTR0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call noalias nonnull dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8 addrspace(4)* [[TMP1]] to i32 addrspace(4)*
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32 addrspace(4)* [[TMP2]], i64 1
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32 addrspace(4)* [[TMP3]], align 4, !invariant.load !0
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, i32 addrspace(4)* [[TMP2]], i64 2
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32 addrspace(4)* [[TMP5]], align 4, !range [[RNG1]], !invariant.load !0
+; CHECK-NEXT:    [[TMP7:%.*]] = lshr i32 [[TMP4]], 16
+; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.amdgcn.workitem.id.x(), !range [[RNG2]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call i32 @llvm.amdgcn.workitem.id.y(), !range [[RNG2]]
+; CHECK-NEXT:    [[TMP10:%.*]] = call i32 @llvm.amdgcn.workitem.id.z(), !range [[RNG2]]
+; CHECK-NEXT:    [[TMP11:%.*]] = mul nuw nsw i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    [[TMP12:%.*]] = mul i32 [[TMP11]], [[TMP8]]
+; CHECK-NEXT:    [[TMP13:%.*]] = mul nuw nsw i32 [[TMP9]], [[TMP6]]
+; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[TMP12]], [[TMP13]]
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP14]], [[TMP10]]
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [256 x [16 x i32]], [256 x [16 x i32]] addrspace(3)* @lds_promoted_alloca_icmp_null_lhs.alloca, i32 0, i32 [[TMP15]]
+; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [16 x i32], [16 x i32] addrspace(3)* [[TMP16]], i32 0, i32 [[A:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 addrspace(3)* null, [[PTR0]]
 ; CHECK-NEXT:    [[ZEXT:%.*]] = zext i1 [[CMP]] to i32
 ; CHECK-NEXT:    store volatile i32 [[ZEXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
 ; CHECK-NEXT:    ret void


        


More information about the llvm-commits mailing list