[llvm] r254060 - [X86][AVX] Regenerate Splat OptSize tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 25 01:06:18 PST 2015


Author: rksimon
Date: Wed Nov 25 03:06:17 2015
New Revision: 254060

URL: http://llvm.org/viewvc/llvm-project?rev=254060&view=rev
Log:
[X86][AVX] Regenerate Splat OptSize tests

Tidied up triple and regenerate tests using update_llc_test_checks.py

Modified:
    llvm/trunk/test/CodeGen/X86/splat-for-size.ll

Modified: llvm/trunk/test/CodeGen/X86/splat-for-size.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/splat-for-size.ll?rev=254060&r1=254059&r2=254060&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/splat-for-size.ll (original)
+++ llvm/trunk/test/CodeGen/X86/splat-for-size.ll Wed Nov 25 03:06:17 2015
@@ -1,141 +1,191 @@
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=avx < %s | FileCheck %s -check-prefix=CHECK --check-prefix=AVX
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=avx2 < %s | FileCheck %s -check-prefix=CHECK --check-prefix=AVX2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2
 
-; Check constant loads of every 128-bit and 256-bit vector type 
+; Check constant loads of every 128-bit and 256-bit vector type
 ; for size optimization using splat ops available with AVX and AVX2.
 
 ; There is no AVX broadcast from double to 128-bit vector because movddup has been around since SSE3 (grrr).
 define <2 x double> @splat_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: splat_v2f64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
   %add = fadd <2 x double> %x, <double 1.0, double 1.0>
   ret <2 x double> %add
-; CHECK-LABEL: splat_v2f64
-; CHECK: vmovddup
-; CHECK: vaddpd 
-; CHECK-NEXT: retq
 }
 
 define <4 x double> @splat_v4f64(<4 x double> %x) #1 {
+; CHECK-LABEL: splat_v4f64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vbroadcastsd {{.*}}(%rip), %ymm1
+; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %add = fadd <4 x double> %x, <double 1.0, double 1.0, double 1.0, double 1.0>
   ret <4 x double> %add
-; CHECK-LABEL: splat_v4f64
-; CHECK: vbroadcastsd 
-; CHECK-NEXT: vaddpd
-; CHECK-NEXT: retq
 }
 
 define <4 x float> @splat_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: splat_v4f32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
   %add = fadd <4 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0>
   ret <4 x float> %add
-; CHECK-LABEL: splat_v4f32
-; CHECK: vbroadcastss 
-; CHECK-NEXT: vaddps
-; CHECK-NEXT: retq
 }
 
 define <8 x float> @splat_v8f32(<8 x float> %x) #1 {
+; CHECK-LABEL: splat_v8f32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vbroadcastss {{.*}}(%rip), %ymm1
+; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %add = fadd <8 x float> %x, <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>
   ret <8 x float> %add
-; CHECK-LABEL: splat_v8f32
-; CHECK: vbroadcastss 
-; CHECK-NEXT: vaddps
-; CHECK-NEXT: retq
 }
 
 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
 define <2 x i64> @splat_v2i64(<2 x i64> %x) #1 {
+; CHECK-LABEL: splat_v2i64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; CHECK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
   %add = add <2 x i64> %x, <i64 1, i64 1>
   ret <2 x i64> %add
-; CHECK-LABEL: splat_v2i64
-; CHECK: vmovddup 
-; CHECK: vpaddq
-; CHECK-NEXT: retq
 }
 
 ; AVX can't do 256-bit integer ops, so we split this into two 128-bit vectors,
 ; and then we fake it: use vmovddup to splat 64-bit value.
 define <4 x i64> @splat_v4i64(<4 x i64> %x) #0 {
+; AVX-LABEL: splat_v4i64:
+; AVX:       # BB#0:
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
+; AVX-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+;
+; AVX2-LABEL: splat_v4i64:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %add = add <4 x i64> %x, <i64 1, i64 1, i64 1, i64 1>
   ret <4 x i64> %add
-; CHECK-LABEL: splat_v4i64
-; AVX: vmovddup
-; AVX: vpaddq 
-; AVX: vpaddq 
-; AVX2: vpbroadcastq 
-; AVX2: vpaddq 
-; CHECK: retq
 }
 
 ; AVX can't do integer splats, so fake it: use vbroadcastss to splat 32-bit value.
 define <4 x i32> @splat_v4i32(<4 x i32> %x) #1 {
+; AVX-LABEL: splat_v4i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX2-LABEL: splat_v4i32:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %add = add <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %add
-; CHECK-LABEL: splat_v4i32
-; AVX: vbroadcastss
-; AVX2: vpbroadcastd 
-; CHECK-NEXT: vpaddd 
-; CHECK-NEXT: retq
 }
 
 ; AVX can't do integer splats, so fake it: use vbroadcastss to splat 32-bit value.
 define <8 x i32> @splat_v8i32(<8 x i32> %x) #0 {
+; AVX-LABEL: splat_v8i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
+; AVX-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+;
+; AVX2-LABEL: splat_v8i32:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %add = add <8 x i32> %x, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   ret <8 x i32> %add
-; CHECK-LABEL: splat_v8i32
-; AVX: vbroadcastss
-; AVX: vpaddd 
-; AVX: vpaddd 
-; AVX2: vpbroadcastd 
-; AVX2: vpaddd 
-; CHECK: retq
 }
 
 ; AVX can't do integer splats, and there's no broadcast fakery for 16-bit. Could use pshuflw, etc?
 define <8 x i16> @splat_v8i16(<8 x i16> %x) #1 {
+; AVX-LABEL: splat_v8i16:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX2-LABEL: splat_v8i16:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpbroadcastw {{.*}}(%rip), %xmm1
+; AVX2-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %add = add <8 x i16> %x, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ret <8 x i16> %add
-; CHECK-LABEL: splat_v8i16
-; AVX-NOT: broadcast
-; AVX2: vpbroadcastw 
-; CHECK: vpaddw 
-; CHECK-NEXT: retq
 }
 
 ; AVX can't do integer splats, and there's no broadcast fakery for 16-bit. Could use pshuflw, etc?
 define <16 x i16> @splat_v16i16(<16 x i16> %x) #0 {
+; AVX-LABEL: splat_v16i16:
+; AVX:       # BB#0:
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1]
+; AVX-NEXT:    vpaddw %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+;
+; AVX2-LABEL: splat_v16i16:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpbroadcastw {{.*}}(%rip), %ymm1
+; AVX2-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %add = add <16 x i16> %x, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ret <16 x i16> %add
-; CHECK-LABEL: splat_v16i16
-; AVX-NOT: broadcast
-; AVX: vpaddw 
-; AVX: vpaddw 
-; AVX2: vpbroadcastw 
-; AVX2: vpaddw 
-; CHECK: retq
 }
 
 ; AVX can't do integer splats, and there's no broadcast fakery for 8-bit. Could use pshufb, etc?
 define <16 x i8> @splat_v16i8(<16 x i8> %x) #1 {
+; AVX-LABEL: splat_v16i8:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX2-LABEL: splat_v16i8:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpbroadcastb {{.*}}(%rip), %xmm1
+; AVX2-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %add = add <16 x i8> %x, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ret <16 x i8> %add
-; CHECK-LABEL: splat_v16i8
-; AVX-NOT: broadcast
-; AVX2: vpbroadcastb 
-; CHECK: vpaddb 
-; CHECK-NEXT: retq
 }
 
 ; AVX can't do integer splats, and there's no broadcast fakery for 8-bit. Could use pshufb, etc?
 define <32 x i8> @splat_v32i8(<32 x i8> %x) #0 {
+; AVX-LABEL: splat_v32i8:
+; AVX:       # BB#0:
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX-NEXT:    vpaddb %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+;
+; AVX2-LABEL: splat_v32i8:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpbroadcastb {{.*}}(%rip), %ymm1
+; AVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %add = add <32 x i8> %x, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ret <32 x i8> %add
-; CHECK-LABEL: splat_v32i8
-; AVX-NOT: broadcast
-; AVX: vpaddb 
-; AVX: vpaddb 
-; AVX2: vpbroadcastb 
-; AVX2: vpaddb 
-; CHECK: retq
 }
 
 ; PR23259: Verify that ISel doesn't crash with a 'fatal error in backend'




More information about the llvm-commits mailing list