[llvm] r298370 - [X86][AVX] Tests showing missing SHUFPD + ZERO lowering
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 21 06:30:40 PDT 2017
Author: rksimon
Date: Tue Mar 21 08:30:40 2017
New Revision: 298370
URL: http://llvm.org/viewvc/llvm-project?rev=298370&view=rev
Log:
[X86][AVX] Tests showing missing SHUFPD + ZERO lowering
This lowers to SHUFPD if the input is zeroinitializer but not with a demanded elts optimized build vector.
Modified:
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=298370&r1=298369&r2=298370&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Tue Mar 21 08:30:40 2017
@@ -544,6 +544,34 @@ define <4 x double> @shuffle_v4f64_1z3z(
ret <4 x double> %shuffle
}
+define <4 x double> @shuffle_v4f64_1z2z(<4 x double> %a, <4 x double> %b) {
+; AVX1-LABEL: shuffle_v4f64_1z2z:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4f64_1z2z:
+; AVX2: # BB#0:
+; AVX2-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4f64_1z2z:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
+; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
+; AVX512VL-NEXT: retq
+ %1 = shufflevector <4 x double> %a, <4 x double> <double 0.000000e+00, double undef, double undef, double undef>, <4 x i32> <i32 1, i32 4, i32 2, i32 4>
+ ret <4 x double> %1
+}
+
define <4 x i64> @shuffle_v4i64_0000(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0000:
; AVX1: # BB#0:
@@ -1536,3 +1564,56 @@ define <4 x i64> @shuffle_v4i64_1230(<4
%shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
ret <4 x i64> %shuffle
}
+
+define <4 x i64> @shuffle_v4i64_z0z3(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_z0z3:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,3]
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_z0z3:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,3]
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4i64_z0z3:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,3]
+; AVX512VL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX512VL-NEXT: retq
+ %1 = shufflevector <4 x i64> %a, <4 x i64> <i64 0, i64 undef, i64 undef, i64 undef>, <4 x i32> <i32 4, i32 0, i32 4, i32 3>
+ ret <4 x i64> %1
+}
+
+define <4 x i64> @shuffle_v4i64_1z2z(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_1z2z:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_1z2z:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,2,0]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4i64_1z2z:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
+; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,2,0]
+; AVX512VL-NEXT: retq
+ %1 = shufflevector <4 x i64> %a, <4 x i64> <i64 0, i64 undef, i64 undef, i64 undef>, <4 x i32> <i32 1, i32 4, i32 2, i32 4>
+ ret <4 x i64> %1
+}
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll?rev=298370&r1=298369&r2=298370&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll Tue Mar 21 08:30:40 2017
@@ -976,6 +976,24 @@ define <8 x double> @shuffle_v8f64_f5112
ret <8 x double> %shuffle
}
+define <8 x double> @shuffle_v8f64_1z2z5z6z(<8 x double> %a, <8 x double> %b) {
+; AVX512F-LABEL: shuffle_v8f64_1z2z5z6z:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-NEXT: vmovapd {{.*#+}} zmm2 = [1,8,2,8,5,8,6,8]
+; AVX512F-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: shuffle_v8f64_1z2z5z6z:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512F-32-NEXT: vmovapd {{.*#+}} zmm2 = [1,0,8,0,2,0,8,0,5,0,8,0,6,0,8,0]
+; AVX512F-32-NEXT: vpermt2pd %zmm1, %zmm2, %zmm0
+; AVX512F-32-NEXT: retl
+ %shuffle = shufflevector <8 x double> %a, <8 x double> <double 0.000000e+00, double undef, double undef, double undef, double undef, double undef, double undef, double undef>, <8 x i32> <i32 1, i32 8, i32 2, i32 8, i32 5, i32 8, i32 6, i32 8>
+ ret <8 x double> %shuffle
+}
+
define <8 x i64> @shuffle_v8i64_00000000(<8 x i64> %a, <8 x i64> %b) {
;
; AVX512F-LABEL: shuffle_v8i64_00000000:
More information about the llvm-commits
mailing list