[llvm] r308724 - [X86][SSE] Add extra (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) test case

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 21 03:22:49 PDT 2017


Author: rksimon
Date: Fri Jul 21 03:22:49 2017
New Revision: 308724

URL: http://llvm.org/viewvc/llvm-project?rev=308724&view=rev
Log:
[X86][SSE] Add extra (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) test case

We should be able to handle the case where some c1+c2 elements exceed max shift and some don't by performing a clamp after the sum

Modified:
    llvm/trunk/test/CodeGen/X86/combine-sra.ll

Modified: llvm/trunk/test/CodeGen/X86/combine-sra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sra.ll?rev=308724&r1=308723&r2=308724&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sra.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sra.ll Fri Jul 21 03:22:49 2017
@@ -125,6 +125,36 @@ define <4 x i32> @combine_vec_ashr_ashr2
   ret <4 x i32> %2
 }
 
+define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_ashr3:
+; SSE:       # BB#0:
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrad $27, %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    psrad $5, %xmm2
+; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrad $31, %xmm1
+; SSE-NEXT:    psrad $1, %xmm0
+; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrad $10, %xmm1
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT:    psrad $31, %xmm0
+; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_ashr_ashr3:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = ashr <4 x i32> %x, <i32  1, i32  5, i32 50, i32 27>
+  %2 = ashr <4 x i32> %1, <i32 33, i32 10, i32 33, i32  0>
+  ret <4 x i32> %2
+}
+
 ; fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
 define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
 ; SSE-LABEL: combine_vec_ashr_trunc_and:




More information about the llvm-commits mailing list