[llvm] r284498 - [X86][SSE] Added vector ashr combine tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 18 10:31:08 PDT 2016
Author: rksimon
Date: Tue Oct 18 12:31:07 2016
New Revision: 284498
URL: http://llvm.org/viewvc/llvm-project?rev=284498&view=rev
Log:
[X86][SSE] Added vector ashr combine tests
This doesn't cover all combines in DAGCombiner::visitSRA yet, but identifies several cases where we fail to combine vectors (or non-splatted) vectors
Added:
llvm/trunk/test/CodeGen/X86/combine-sra.ll
Added: llvm/trunk/test/CodeGen/X86/combine-sra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sra.ll?rev=284498&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sra.ll (added)
+++ llvm/trunk/test/CodeGen/X86/combine-sra.ll Tue Oct 18 12:31:07 2016
@@ -0,0 +1,299 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
+
+; fold (sra 0, x) -> 0
+define <4 x i32> @combine_vec_ashr_zero(<4 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_zero:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_zero:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = ashr <4 x i32> zeroinitializer, %x
+ ret <4 x i32> %1
+}
+
+; fold (sra -1, x) -> -1
+define <4 x i32> @combine_vec_ashr_allones(<4 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_allones:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_allones:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
+ ret <4 x i32> %1
+}
+
+; fold (sra x, c >= size(x)) -> undef
+define <4 x i32> @combine_vec_ashr_outofrange0(<4 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_outofrange0:
+; SSE: # BB#0:
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_outofrange0:
+; AVX: # BB#0:
+; AVX-NEXT: retq
+ %1 = ashr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
+ ret <4 x i32> %1
+}
+
+define <4 x i32> @combine_vec_ashr_outofrange1(<4 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_outofrange1:
+; SSE: # BB#0:
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_outofrange1:
+; AVX: # BB#0:
+; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
+ ret <4 x i32> %1
+}
+
+; fold (sra x, 0) -> x
+define <4 x i32> @combine_vec_ashr_by_zero(<4 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_by_zero:
+; SSE: # BB#0:
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_by_zero:
+; AVX: # BB#0:
+; AVX-NEXT: retq
+ %1 = ashr <4 x i32> %x, zeroinitializer
+ ret <4 x i32> %1
+}
+
+; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
+define <4 x i32> @combine_vec_ashr_ashr0(<4 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_ashr0:
+; SSE: # BB#0:
+; SSE-NEXT: psrad $6, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_ashr0:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrad $6, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = ashr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
+ %2 = ashr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
+ ret <4 x i32> %2
+}
+
+define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_ashr1:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $2, %xmm1
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: psrad $3, %xmm0
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: psrad $7, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrad $5, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: psrad $6, %xmm0
+; SSE-NEXT: psrad $4, %xmm1
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_ashr1:
+; AVX: # BB#0:
+; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = ashr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
+ %2 = ashr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i32> %2
+}
+
+define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_ashr2:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $20, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrad $18, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $19, %xmm1
+; SSE-NEXT: psrad $17, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $28, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrad $26, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $27, %xmm1
+; SSE-NEXT: psrad $25, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_ashr2:
+; AVX: # BB#0:
+; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = ashr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
+ %2 = ashr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
+ ret <4 x i32> %2
+}
+
+; fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
+define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
+; SSE-LABEL: combine_vec_ashr_trunc_and:
+; SSE: # BB#0:
+; SSE-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE-NEXT: pand {{.*}}(%rip), %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrad %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrlq $32, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: psrad %xmm2, %xmm4
+; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
+; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrad %xmm1, %xmm2
+; SSE-NEXT: psrad %xmm3, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_trunc_and:
+; AVX: # BB#0:
+; AVX-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ %3 = ashr <4 x i32> %x, %2
+ ret <4 x i32> %3
+}
+
+; fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
+; if c1 is equal to the number of bits the trunc removes
+define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
+; SSE-LABEL: combine_vec_ashr_trunc_lshr:
+; SSE: # BB#0:
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: psrad $2, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: psrad $3, %xmm2
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_trunc_lshr:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ %3 = ashr <4 x i32> %2, <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %3
+}
+
+; fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2))
+; if c1 is equal to the number of bits the trunc removes
+define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
+; SSE-LABEL: combine_vec_ashr_trunc_ashr:
+; SSE: # BB#0:
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,0,2]
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: psrad $2, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: psrad $3, %xmm1
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_trunc_ashr:
+; AVX: # BB#0:
+; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
+; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %1 = ashr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ %3 = ashr <4 x i32> %2, <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %3
+}
+
+; If the sign bit is known to be zero, switch this to a SRL.
+define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
+; SSE-LABEL: combine_vec_ashr_positive:
+; SSE: # BB#0:
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrld %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrlq $32, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: psrld %xmm2, %xmm4
+; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
+; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrld %xmm1, %xmm2
+; SSE-NEXT: psrld %xmm3, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_vec_ashr_positive:
+; AVX: # BB#0:
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = and <4 x i32> %x, <i32 15, i32 255, i32 4095, i32 65535>
+ %2 = ashr <4 x i32> %1, %y
+ ret <4 x i32> %2
+}
More information about the llvm-commits
mailing list