[llvm] r334290 - [X86][SSE] Add SSE2/AVX2 vector rotate tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 8 07:07:21 PDT 2018


Author: rksimon
Date: Fri Jun  8 07:07:21 2018
New Revision: 334290

URL: http://llvm.org/viewvc/llvm-project?rev=334290&view=rev
Log:
[X86][SSE] Add SSE2/AVX2 vector rotate tests 

Now that we're custom lowering vector rotates for SSE in general we should be testing the combines with them as well.

Modified:
    llvm/trunk/test/CodeGen/X86/combine-rotates.ll

Modified: llvm/trunk/test/CodeGen/X86/combine-rotates.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-rotates.ll?rev=334290&r1=334289&r2=334290&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-rotates.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-rotates.ll Fri Jun  8 07:07:21 2018
@@ -1,14 +1,39 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=XOP
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefixes=CHECK,XOP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
 
 ; fold (rot (rot x, c1), c2) -> rot x, c1+c2
 define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
+; SSE2-LABEL: combine_vec_rot_rot:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [524288,131072,32768,8192]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
 ; XOP-LABEL: combine_vec_rot_rot:
 ; XOP:       # %bb.0:
 ; XOP-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX2-LABEL: combine_vec_rot_rot:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
 ; AVX512-LABEL: combine_vec_rot_rot:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vprolvd {{.*}}(%rip), %xmm0, %xmm0
@@ -23,11 +48,26 @@ define <4 x i32> @combine_vec_rot_rot(<4
 }
 
 define <4 x i32> @combine_vec_rot_rot_splat(<4 x i32> %x) {
+; SSE2-LABEL: combine_vec_rot_rot_splat:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    psrld $25, %xmm1
+; SSE2-NEXT:    pslld $7, %xmm0
+; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
 ; XOP-LABEL: combine_vec_rot_rot_splat:
 ; XOP:       # %bb.0:
 ; XOP-NEXT:    vprotd $7, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX2-LABEL: combine_vec_rot_rot_splat:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrld $25, %xmm0, %xmm1
+; AVX2-NEXT:    vpslld $7, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
 ; AVX512-LABEL: combine_vec_rot_rot_splat:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vprold $7, %xmm0, %xmm0
@@ -42,13 +82,9 @@ define <4 x i32> @combine_vec_rot_rot_sp
 }
 
 define <4 x i32> @combine_vec_rot_rot_splat_zero(<4 x i32> %x) {
-; XOP-LABEL: combine_vec_rot_rot_splat_zero:
-; XOP:       # %bb.0:
-; XOP-NEXT:    retq
-;
-; AVX512-LABEL: combine_vec_rot_rot_splat_zero:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    retq
+; CHECK-LABEL: combine_vec_rot_rot_splat_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
   %2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
   %3 = or <4 x i32> %1, %2
@@ -59,12 +95,42 @@ define <4 x i32> @combine_vec_rot_rot_sp
 }
 
 define <4 x i32> @rotate_demanded_bits(<4 x i32>, <4 x i32>) {
+; SSE2-LABEL: rotate_demanded_bits:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    pslld $23, %xmm1
+; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
 ; XOP-LABEL: rotate_demanded_bits:
 ; XOP:       # %bb.0:
 ; XOP-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
 ; XOP-NEXT:    vprotd %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX2-LABEL: rotate_demanded_bits:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [30,30,30,30]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
 ; AVX512-LABEL: rotate_demanded_bits:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm1
@@ -80,12 +146,42 @@ define <4 x i32> @rotate_demanded_bits(<
 }
 
 define <4 x i32> @rotate_demanded_bits_2(<4 x i32>, <4 x i32>) {
+; SSE2-LABEL: rotate_demanded_bits_2:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    pslld $23, %xmm1
+; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
 ; XOP-LABEL: rotate_demanded_bits_2:
 ; XOP:       # %bb.0:
 ; XOP-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
 ; XOP-NEXT:    vprotd %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX2-LABEL: rotate_demanded_bits_2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [23,23,23,23]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
 ; AVX512-LABEL: rotate_demanded_bits_2:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm1
@@ -101,6 +197,26 @@ define <4 x i32> @rotate_demanded_bits_2
 }
 
 define <4 x i32> @rotate_demanded_bits_3(<4 x i32>, <4 x i32>) {
+; SSE2-LABEL: rotate_demanded_bits_3:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    paddd %xmm1, %xmm1
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    pslld $23, %xmm1
+; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm2, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    retq
+;
 ; XOP-LABEL: rotate_demanded_bits_3:
 ; XOP:       # %bb.0:
 ; XOP-NEXT:    vpaddd %xmm1, %xmm1, %xmm1
@@ -108,6 +224,18 @@ define <4 x i32> @rotate_demanded_bits_3
 ; XOP-NEXT:    vprotd %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX2-LABEL: rotate_demanded_bits_3:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddd %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [30,30,30,30]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; AVX2-NEXT:    vpsubd %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
 ; AVX512-LABEL: rotate_demanded_bits_3:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpaddd %xmm1, %xmm1, %xmm1




More information about the llvm-commits mailing list