[llvm] b215adf - [SLP][AMDGPU] Regenerate packed-math tests and remove unused check prefix

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 6 09:41:00 PST 2020


Author: Simon Pilgrim
Date: 2020-11-06T17:27:13Z
New Revision: b215adf4edca766c1f06727f1c782a9e8d4a3c2e

URL: https://github.com/llvm/llvm-project/commit/b215adf4edca766c1f06727f1c782a9e8d4a3c2e
DIFF: https://github.com/llvm/llvm-project/commit/b215adf4edca766c1f06727f1c782a9e8d4a3c2e.diff

LOG: [SLP][AMDGPU] Regenerate packed-math tests and remove unused check prefix

Added: 
    

Modified: 
    llvm/test/Transforms/SLPVectorizer/AMDGPU/packed-math.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/packed-math.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/packed-math.ll
index 55905a4c444c..67c7692376cf 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/packed-math.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/packed-math.ll
@@ -1,16 +1,21 @@
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -slp-vectorizer -dce < %s | FileCheck -check-prefixes=GCN,GFX9,GFX89 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -slp-vectorizer -dce < %s | FileCheck -check-prefixes=GCN,VI,GFX89 %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -slp-vectorizer -dce < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -slp-vectorizer -dce < %s | FileCheck -check-prefixes=GCN,VI %s
 
 ; FIXME: Should still like to vectorize the memory operations for VI
 
 ; Simple 3-pair chain with loads and stores
-; GCN-LABEL: @test1_as_3_3_3_v2f16(
-; GFX89: load <2 x half>, <2 x half> addrspace(3)*
-; GFX89: load <2 x half>, <2 x half> addrspace(3)*
-; GFX89: fmul <2 x half>
-; GFX89: store <2 x half> %{{.*}}, <2 x half> addrspace(3)* %
-; GFX89: ret
 define amdgpu_kernel void @test1_as_3_3_3_v2f16(half addrspace(3)* %a, half addrspace(3)* %b, half addrspace(3)* %c) {
+; GCN-LABEL: @test1_as_3_3_3_v2f16(
+; GCN-NEXT:    [[TMP1:%.*]] = bitcast half addrspace(3)* [[A:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP2:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP1]], align 2
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast half addrspace(3)* [[B:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP4:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP3]], align 2
+; GCN-NEXT:    [[TMP5:%.*]] = fmul <2 x half> [[TMP2]], [[TMP4]]
+; GCN-NEXT:    [[TMP6:%.*]] = bitcast half addrspace(3)* [[C:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    store <2 x half> [[TMP5]], <2 x half> addrspace(3)* [[TMP6]], align 2
+; GCN-NEXT:    ret void
+;
   %i0 = load half, half addrspace(3)* %a, align 2
   %i1 = load half, half addrspace(3)* %b, align 2
   %mul = fmul half %i0, %i1
@@ -25,13 +30,17 @@ define amdgpu_kernel void @test1_as_3_3_3_v2f16(half addrspace(3)* %a, half addr
   ret void
 }
 
-; GCN-LABEL: @test1_as_3_0_0(
-; GFX89: load <2 x half>, <2 x half> addrspace(3)*
-; GFX89: load <2 x half>, <2 x half>*
-; GFX89: fmul <2 x half>
-; GFX89: store <2 x half> %{{.*}}, <2 x half>* %
-; GFX89: ret
 define amdgpu_kernel void @test1_as_3_0_0(half addrspace(3)* %a, half* %b, half* %c) {
+; GCN-LABEL: @test1_as_3_0_0(
+; GCN-NEXT:    [[TMP1:%.*]] = bitcast half addrspace(3)* [[A:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP2:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP1]], align 2
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast half* [[B:%.*]] to <2 x half>*
+; GCN-NEXT:    [[TMP4:%.*]] = load <2 x half>, <2 x half>* [[TMP3]], align 2
+; GCN-NEXT:    [[TMP5:%.*]] = fmul <2 x half> [[TMP2]], [[TMP4]]
+; GCN-NEXT:    [[TMP6:%.*]] = bitcast half* [[C:%.*]] to <2 x half>*
+; GCN-NEXT:    store <2 x half> [[TMP5]], <2 x half>* [[TMP6]], align 2
+; GCN-NEXT:    ret void
+;
   %i0 = load half, half addrspace(3)* %a, align 2
   %i1 = load half, half* %b, align 2
   %mul = fmul half %i0, %i1
@@ -46,13 +55,17 @@ define amdgpu_kernel void @test1_as_3_0_0(half addrspace(3)* %a, half* %b, half*
   ret void
 }
 
-; GCN-LABEL: @test1_as_0_0_3_v2f16(
-; GFX89: load <2 x half>, <2 x half>*
-; GFX89: load <2 x half>, <2 x half>*
-; GFX89: fmul <2 x half>
-; GFX89: store <2 x half> %{{.*}}, <2 x half> addrspace(3)* %
-; GFX89: ret
 define amdgpu_kernel void @test1_as_0_0_3_v2f16(half* %a, half* %b, half addrspace(3)* %c) {
+; GCN-LABEL: @test1_as_0_0_3_v2f16(
+; GCN-NEXT:    [[TMP1:%.*]] = bitcast half* [[A:%.*]] to <2 x half>*
+; GCN-NEXT:    [[TMP2:%.*]] = load <2 x half>, <2 x half>* [[TMP1]], align 2
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast half* [[B:%.*]] to <2 x half>*
+; GCN-NEXT:    [[TMP4:%.*]] = load <2 x half>, <2 x half>* [[TMP3]], align 2
+; GCN-NEXT:    [[TMP5:%.*]] = fmul <2 x half> [[TMP2]], [[TMP4]]
+; GCN-NEXT:    [[TMP6:%.*]] = bitcast half addrspace(3)* [[C:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    store <2 x half> [[TMP5]], <2 x half> addrspace(3)* [[TMP6]], align 2
+; GCN-NEXT:    ret void
+;
   %i0 = load half, half* %a, align 2
   %i1 = load half, half* %b, align 2
   %mul = fmul half %i0, %i1
@@ -67,13 +80,19 @@ define amdgpu_kernel void @test1_as_0_0_3_v2f16(half* %a, half* %b, half addrspa
   ret void
 }
 
-; GCN-LABEL: @test1_fma_v2f16(
-; GFX9: load <2 x half>
-; GFX9: load <2 x half>
-; GFX9: load <2 x half>
-; GFX9: call <2 x half> @llvm.fma.v2f16(
-; GFX9: store <2 x half>
 define amdgpu_kernel void @test1_fma_v2f16(half addrspace(3)* %a, half addrspace(3)* %b, half addrspace(3)* %c, half addrspace(3)* %d) {
+; GCN-LABEL: @test1_fma_v2f16(
+; GCN-NEXT:    [[TMP1:%.*]] = bitcast half addrspace(3)* [[A:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP2:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP1]], align 2
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast half addrspace(3)* [[B:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP4:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP3]], align 2
+; GCN-NEXT:    [[TMP5:%.*]] = bitcast half addrspace(3)* [[C:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP6:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP5]], align 2
+; GCN-NEXT:    [[TMP7:%.*]] = call <2 x half> @llvm.fma.v2f16(<2 x half> [[TMP2]], <2 x half> [[TMP4]], <2 x half> [[TMP6]])
+; GCN-NEXT:    [[TMP8:%.*]] = bitcast half addrspace(3)* [[D:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    store <2 x half> [[TMP7]], <2 x half> addrspace(3)* [[TMP8]], align 2
+; GCN-NEXT:    ret void
+;
   %i0 = load half, half addrspace(3)* %a, align 2
   %i1 = load half, half addrspace(3)* %b, align 2
   %i2 = load half, half addrspace(3)* %c, align 2
@@ -91,11 +110,17 @@ define amdgpu_kernel void @test1_fma_v2f16(half addrspace(3)* %a, half addrspace
   ret void
 }
 
-; GCN-LABEL: @mul_scalar_v2f16(
-; GFX9: load <2 x half>
-; GFX9: fmul <2 x half>
-; GFX9: store <2 x half>
 define amdgpu_kernel void @mul_scalar_v2f16(half addrspace(3)* %a, half %scalar, half addrspace(3)* %c) {
+; GCN-LABEL: @mul_scalar_v2f16(
+; GCN-NEXT:    [[TMP1:%.*]] = bitcast half addrspace(3)* [[A:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP2:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP1]], align 2
+; GCN-NEXT:    [[TMP3:%.*]] = insertelement <2 x half> undef, half [[SCALAR:%.*]], i32 0
+; GCN-NEXT:    [[TMP4:%.*]] = insertelement <2 x half> [[TMP3]], half [[SCALAR]], i32 1
+; GCN-NEXT:    [[TMP5:%.*]] = fmul <2 x half> [[TMP2]], [[TMP4]]
+; GCN-NEXT:    [[TMP6:%.*]] = bitcast half addrspace(3)* [[C:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    store <2 x half> [[TMP5]], <2 x half> addrspace(3)* [[TMP6]], align 2
+; GCN-NEXT:    ret void
+;
   %i0 = load half, half addrspace(3)* %a, align 2
   %mul = fmul half %i0, %scalar
   %arrayidx3 = getelementptr inbounds half, half addrspace(3)* %a, i64 1
@@ -107,11 +132,15 @@ define amdgpu_kernel void @mul_scalar_v2f16(half addrspace(3)* %a, half %scalar,
   ret void
 }
 
-; GCN-LABEL: @fabs_v2f16
-; GFX9: load <2 x half>
-; GFX9: call <2 x half> @llvm.fabs.v2f16(
-; GFX9: store <2 x half>
 define amdgpu_kernel void @fabs_v2f16(half addrspace(3)* %a, half addrspace(3)* %c) {
+; GCN-LABEL: @fabs_v2f16(
+; GCN-NEXT:    [[TMP1:%.*]] = bitcast half addrspace(3)* [[A:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP2:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP1]], align 2
+; GCN-NEXT:    [[TMP3:%.*]] = call <2 x half> @llvm.fabs.v2f16(<2 x half> [[TMP2]])
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast half addrspace(3)* [[C:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    store <2 x half> [[TMP3]], <2 x half> addrspace(3)* [[TMP4]], align 2
+; GCN-NEXT:    ret void
+;
   %i0 = load half, half addrspace(3)* %a, align 2
   %fabs0 = call half @llvm.fabs.f16(half %i0)
   %arrayidx3 = getelementptr inbounds half, half addrspace(3)* %a, i64 1
@@ -123,12 +152,20 @@ define amdgpu_kernel void @fabs_v2f16(half addrspace(3)* %a, half addrspace(3)*
   ret void
 }
 
-; GCN-LABEL: @test1_fabs_fma_v2f16(
-; GFX9: load <2 x half>
-; GFX9: call <2 x half> @llvm.fabs.v2f16(
-; GFX9: call <2 x half> @llvm.fma.v2f16(
-; GFX9: store <2 x half>
 define amdgpu_kernel void @test1_fabs_fma_v2f16(half addrspace(3)* %a, half addrspace(3)* %b, half addrspace(3)* %c, half addrspace(3)* %d) {
+; GCN-LABEL: @test1_fabs_fma_v2f16(
+; GCN-NEXT:    [[TMP1:%.*]] = bitcast half addrspace(3)* [[A:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP2:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP1]], align 2
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast half addrspace(3)* [[B:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP4:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP3]], align 2
+; GCN-NEXT:    [[TMP5:%.*]] = bitcast half addrspace(3)* [[C:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP6:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP5]], align 2
+; GCN-NEXT:    [[TMP7:%.*]] = call <2 x half> @llvm.fabs.v2f16(<2 x half> [[TMP2]])
+; GCN-NEXT:    [[TMP8:%.*]] = call <2 x half> @llvm.fma.v2f16(<2 x half> [[TMP7]], <2 x half> [[TMP4]], <2 x half> [[TMP6]])
+; GCN-NEXT:    [[TMP9:%.*]] = bitcast half addrspace(3)* [[D:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    store <2 x half> [[TMP8]], <2 x half> addrspace(3)* [[TMP9]], align 2
+; GCN-NEXT:    ret void
+;
   %i0 = load half, half addrspace(3)* %a, align 2
   %i1 = load half, half addrspace(3)* %b, align 2
   %i2 = load half, half addrspace(3)* %c, align 2
@@ -150,16 +187,23 @@ define amdgpu_kernel void @test1_fabs_fma_v2f16(half addrspace(3)* %a, half addr
   ret void
 }
 
-; FIXME: Should do vector load and extract component for fabs
-; GCN-LABEL: @test1_fabs_scalar_fma_v2f16(
-; GFX9: load half
-; GFX9: call half @llvm.fabs.f16(
-; GFX9: load <2 x half>
-; GFX9: load half
-; GFX9: load <2 x half>
-; GFX9: call <2 x half> @llvm.fma.v2f16(
-; GFX9: store <2 x half>
 define amdgpu_kernel void @test1_fabs_scalar_fma_v2f16(half addrspace(3)* %a, half addrspace(3)* %b, half addrspace(3)* %c, half addrspace(3)* %d) {
+; GCN-LABEL: @test1_fabs_scalar_fma_v2f16(
+; GCN-NEXT:    [[I1:%.*]] = load half, half addrspace(3)* [[B:%.*]], align 2
+; GCN-NEXT:    [[I1_FABS:%.*]] = call half @llvm.fabs.f16(half [[I1]])
+; GCN-NEXT:    [[TMP1:%.*]] = bitcast half addrspace(3)* [[A:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP2:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP1]], align 2
+; GCN-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds half, half addrspace(3)* [[B]], i64 1
+; GCN-NEXT:    [[I4:%.*]] = load half, half addrspace(3)* [[ARRAYIDX4]], align 2
+; GCN-NEXT:    [[TMP3:%.*]] = bitcast half addrspace(3)* [[C:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    [[TMP4:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP3]], align 2
+; GCN-NEXT:    [[TMP5:%.*]] = insertelement <2 x half> undef, half [[I1_FABS]], i32 0
+; GCN-NEXT:    [[TMP6:%.*]] = insertelement <2 x half> [[TMP5]], half [[I4]], i32 1
+; GCN-NEXT:    [[TMP7:%.*]] = call <2 x half> @llvm.fma.v2f16(<2 x half> [[TMP2]], <2 x half> [[TMP6]], <2 x half> [[TMP4]])
+; GCN-NEXT:    [[TMP8:%.*]] = bitcast half addrspace(3)* [[D:%.*]] to <2 x half> addrspace(3)*
+; GCN-NEXT:    store <2 x half> [[TMP7]], <2 x half> addrspace(3)* [[TMP8]], align 2
+; GCN-NEXT:    ret void
+;
   %i0 = load half, half addrspace(3)* %a, align 2
   %i1 = load half, half addrspace(3)* %b, align 2
   %i2 = load half, half addrspace(3)* %c, align 2
@@ -179,11 +223,26 @@ define amdgpu_kernel void @test1_fabs_scalar_fma_v2f16(half addrspace(3)* %a, ha
   ret void
 }
 
-; GCN-LABEL: @canonicalize_v2f16
-; GFX9: load <2 x half>
-; GFX9: call <2 x half> @llvm.canonicalize.v2f16(
-; GFX9: store <2 x half>
 define amdgpu_kernel void @canonicalize_v2f16(half addrspace(3)* %a, half addrspace(3)* %c) {
+; GFX9-LABEL: @canonicalize_v2f16(
+; GFX9-NEXT:    [[TMP1:%.*]] = bitcast half addrspace(3)* [[A:%.*]] to <2 x half> addrspace(3)*
+; GFX9-NEXT:    [[TMP2:%.*]] = load <2 x half>, <2 x half> addrspace(3)* [[TMP1]], align 2
+; GFX9-NEXT:    [[TMP3:%.*]] = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> [[TMP2]])
+; GFX9-NEXT:    [[TMP4:%.*]] = bitcast half addrspace(3)* [[C:%.*]] to <2 x half> addrspace(3)*
+; GFX9-NEXT:    store <2 x half> [[TMP3]], <2 x half> addrspace(3)* [[TMP4]], align 2
+; GFX9-NEXT:    ret void
+;
+; VI-LABEL: @canonicalize_v2f16(
+; VI-NEXT:    [[I0:%.*]] = load half, half addrspace(3)* [[A:%.*]], align 2
+; VI-NEXT:    [[CANONICALIZE0:%.*]] = call half @llvm.canonicalize.f16(half [[I0]])
+; VI-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds half, half addrspace(3)* [[A]], i64 1
+; VI-NEXT:    [[I3:%.*]] = load half, half addrspace(3)* [[ARRAYIDX3]], align 2
+; VI-NEXT:    [[CANONICALIZE1:%.*]] = call half @llvm.canonicalize.f16(half [[I3]])
+; VI-NEXT:    store half [[CANONICALIZE0]], half addrspace(3)* [[C:%.*]], align 2
+; VI-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds half, half addrspace(3)* [[C]], i64 1
+; VI-NEXT:    store half [[CANONICALIZE1]], half addrspace(3)* [[ARRAYIDX5]], align 2
+; VI-NEXT:    ret void
+;
   %i0 = load half, half addrspace(3)* %a, align 2
   %canonicalize0 = call half @llvm.canonicalize.f16(half %i0)
   %arrayidx3 = getelementptr inbounds half, half addrspace(3)* %a, i64 1


        


More information about the llvm-commits mailing list