[llvm] r325349 - [X86][SSE] Allow float domain crossing if we are merging 2 or more shuffles and the root started as a float domain shuffle
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 16 06:57:25 PST 2018
Author: rksimon
Date: Fri Feb 16 06:57:25 2018
New Revision: 325349
URL: http://llvm.org/viewvc/llvm-project?rev=325349&view=rev
Log:
[X86][SSE] Allow float domain crossing if we are merging 2 or more shuffles and the root started as a float domain shuffle
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll
llvm/trunk/test/CodeGen/X86/combine-sra.ll
llvm/trunk/test/CodeGen/X86/oddshuffles.ll
llvm/trunk/test/CodeGen/X86/pmul.ll
llvm/trunk/test/CodeGen/X86/vector-trunc.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=325349&r1=325348&r2=325349&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Feb 16 06:57:25 2018
@@ -28747,6 +28747,7 @@ static SDValue combineX86ShuffleChain(Ar
unsigned NumRootElts = RootVT.getVectorNumElements();
unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
+ (RootVT.isFloatingPoint() && Depth >= 2) ||
(RootVT.is256BitVector() && !Subtarget.hasAVX2());
// Don't combine if we are a AVX512/EVEX target and the mask element size
Modified: llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll?rev=325349&r1=325348&r2=325349&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll Fri Feb 16 06:57:25 2018
@@ -686,7 +686,7 @@ define <2 x i64> @_clearupper2xi64b(<2 x
define <4 x i64> @_clearupper4xi64b(<4 x i64>) nounwind {
; SSE2-LABEL: _clearupper4xi64b:
; SSE2: # %bb.0:
-; SSE2-NEXT: movaps {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
+; SSE2-NEXT: movaps {{.*#+}} xmm2
; SSE2-NEXT: andps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/combine-sra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sra.ll?rev=325349&r1=325348&r2=325349&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sra.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sra.ll Fri Feb 16 06:57:25 2018
@@ -197,9 +197,7 @@ define <4 x i32> @combine_vec_ashr_trunc
define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
; SSE-LABEL: combine_vec_ashr_trunc_lshr:
; SSE: # %bb.0:
-; SSE-NEXT: psrlq $32, %xmm1
-; SSE-NEXT: psrlq $32, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: psrad $2, %xmm1
Modified: llvm/trunk/test/CodeGen/X86/oddshuffles.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/oddshuffles.ll?rev=325349&r1=325348&r2=325349&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/oddshuffles.ll (original)
+++ llvm/trunk/test/CodeGen/X86/oddshuffles.ll Fri Feb 16 06:57:25 2018
@@ -614,18 +614,17 @@ define void @v12i32(<8 x i32> %a, <8 x i
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,2]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,1,0,1]
-; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
+; SSE2-NEXT: movaps %xmm2, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0],xmm3[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2]
-; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: movaps %xmm2, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm1[1,0]
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[2,2]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0,2]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,3,2,2]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,2],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: movaps %xmm0, 32(%rdi)
; SSE2-NEXT: movaps %xmm4, 16(%rdi)
@@ -1562,39 +1561,37 @@ define void @interleave_24i32_in(<24 x i
; SSE2-NEXT: movdqu 16(%rsi), %xmm2
; SSE2-NEXT: movdqu (%rdx), %xmm6
; SSE2-NEXT: movdqu 16(%rdx), %xmm1
-; SSE2-NEXT: movdqu (%rcx), %xmm7
-; SSE2-NEXT: movdqu 16(%rcx), %xmm4
+; SSE2-NEXT: movups (%rcx), %xmm7
+; SSE2-NEXT: movups 16(%rcx), %xmm4
; SSE2-NEXT: movdqa %xmm5, %xmm0
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,1,0,1]
-; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[3,0]
+; SSE2-NEXT: movaps %xmm7, %xmm3
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0,2]
-; SSE2-NEXT: movdqa %xmm7, %xmm3
+; SSE2-NEXT: movaps %xmm7, %xmm3
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm6[1,0]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm6[2,2]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; SSE2-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0,2]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[0,3,2,2]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,2,3,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm5[3,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,2],xmm5[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0,2]
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,2,2]
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,1,0,1]
-; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm6[3,0]
+; SSE2-NEXT: movaps %xmm4, %xmm7
+; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm6[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm7[0,2]
-; SSE2-NEXT: movdqa %xmm4, %xmm7
+; SSE2-NEXT: movaps %xmm4, %xmm7
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm1[1,0]
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm1[2,2]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE2-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm7[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm2[0,2]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,3,2,2]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[3,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,2],xmm2[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
; SSE2-NEXT: movups %xmm2, 80(%rdi)
; SSE2-NEXT: movups %xmm7, 64(%rdi)
Modified: llvm/trunk/test/CodeGen/X86/pmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmul.ll?rev=325349&r1=325348&r2=325349&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pmul.ll Fri Feb 16 06:57:25 2018
@@ -1261,11 +1261,9 @@ define <4 x i32> @mul_v4i64_zero_lower(<
; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSE2-NEXT: psrlq $32, %xmm2
; SSE2-NEXT: pmuludq %xmm0, %xmm2
-; SSE2-NEXT: psllq $32, %xmm2
; SSE2-NEXT: psrlq $32, %xmm1
; SSE2-NEXT: pmuludq %xmm1, %xmm3
-; SSE2-NEXT: psllq $32, %xmm3
-; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm2[1,3]
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm2[0,2]
; SSE2-NEXT: movaps %xmm3, %xmm0
; SSE2-NEXT: retq
;
@@ -1276,11 +1274,9 @@ define <4 x i32> @mul_v4i64_zero_lower(<
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: psrlq $32, %xmm1
; SSE41-NEXT: pmuludq %xmm1, %xmm0
-; SSE41-NEXT: psllq $32, %xmm0
; SSE41-NEXT: psrlq $32, %xmm2
; SSE41-NEXT: pmuludq %xmm3, %xmm2
-; SSE41-NEXT: psllq $32, %xmm2
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i64_zero_lower:
Modified: llvm/trunk/test/CodeGen/X86/vector-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc.ll?rev=325349&r1=325348&r2=325349&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc.ll Fri Feb 16 06:57:25 2018
@@ -101,25 +101,17 @@ entry:
define <8 x i32> @trunc8i64_8i32_lshr(<8 x i64> %a) {
; SSE-LABEL: trunc8i64_8i32_lshr:
; SSE: # %bb.0: # %entry
-; SSE-NEXT: psrlq $32, %xmm3
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
-; SSE-NEXT: psrlq $32, %xmm1
-; SSE-NEXT: psrlq $32, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
; SSE-NEXT: movaps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc8i64_8i32_lshr:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
More information about the llvm-commits
mailing list