[llvm] 79522f2 - [X86][SSE] Add SSE2/SSE42 test coverage to urem combine tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 17 12:58:21 PDT 2021


Author: Simon Pilgrim
Date: 2021-03-17T19:58:03Z
New Revision: 79522f2180a434b34b5714fb83d968e42b65a64a

URL: https://github.com/llvm/llvm-project/commit/79522f2180a434b34b5714fb83d968e42b65a64a
DIFF: https://github.com/llvm/llvm-project/commit/79522f2180a434b34b5714fb83d968e42b65a64a.diff

LOG: [X86][SSE] Add SSE2/SSE42 test coverage to urem combine tests

Noticed when reviewing D88785

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll b/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll
index c160807cf053..af01df6436ec 100644
--- a/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll
+++ b/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll
@@ -1,5 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse,+sse2,+avx,+avx2 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2   | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2   | FileCheck %s --check-prefixes=CHECK,AVX2
 
 ; Given:
 ;   icmp eq/ne (urem %x, C), 0
@@ -77,23 +79,63 @@ define i1 @p3_scalar_shifted2_urem_by_const(i32 %x, i32 %y) {
 ;------------------------------------------------------------------------------;
 
 define <4 x i1> @p4_vector_urem_by_const__splat(<4 x i32> %x, <4 x i32> %y) {
-; CHECK-LABEL: p4_vector_urem_by_const__splat:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128]
-; CHECK-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
-; CHECK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-NEXT:    vpsrld $2, %xmm1, %xmm1
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [6,6,6,6]
-; CHECK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; SSE2-LABEL: p4_vector_urem_by_const__splat:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    psrld $2, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [6,6,6,6]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    psubd %xmm2, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE4-LABEL: p4_vector_urem_by_const__splat:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE4-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
+; SSE4-NEXT:    pmuludq %xmm2, %xmm1
+; SSE4-NEXT:    pmuludq %xmm0, %xmm2
+; SSE4-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE4-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; SSE4-NEXT:    psrld $2, %xmm2
+; SSE4-NEXT:    pmulld {{.*}}(%rip), %xmm2
+; SSE4-NEXT:    psubd %xmm2, %xmm0
+; SSE4-NEXT:    pxor %xmm1, %xmm1
+; SSE4-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: p4_vector_urem_by_const__splat:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128]
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
+; AVX2-NEXT:    vpsrld $2, %xmm1, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [6,6,6,6]
+; AVX2-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %t0 = and <4 x i32> %x, <i32 128, i32 128, i32 128, i32 128> ; clearly a power-of-two or zero
   %t1 = urem <4 x i32> %t0, <i32 6, i32 6, i32 6, i32 6> ; '6' is clearly not a power of two
   %t2 = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
@@ -101,22 +143,72 @@ define <4 x i1> @p4_vector_urem_by_const__splat(<4 x i32> %x, <4 x i32> %y) {
 }
 
 define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) {
-; CHECK-LABEL: p5_vector_urem_by_const__nonsplat:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; CHECK-NEXT:    vmovdqa {{.*#+}} xmm1 = [2863311531,3435973837,2863311531,954437177]
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
-; CHECK-NEXT:    vpmuludq %xmm1, %xmm0, %xmm1
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; SSE2-LABEL: p5_vector_urem_by_const__nonsplat:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,3435973837,2863311531,954437177]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    psrld $1, %xmm1
+; SSE2-NEXT:    psrld $2, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[3,3]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [3,5,6,9]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[0,2,2,3]
+; SSE2-NEXT:    movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
+; SSE2-NEXT:    pmuludq %xmm4, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT:    psubd %xmm1, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE4-LABEL: p5_vector_urem_by_const__nonsplat:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE4-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,3435973837,2863311531,954437177]
+; SSE4-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE4-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE4-NEXT:    pmuludq %xmm2, %xmm3
+; SSE4-NEXT:    pmuludq %xmm0, %xmm1
+; SSE4-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE4-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE4-NEXT:    movdqa %xmm1, %xmm2
+; SSE4-NEXT:    psrld $2, %xmm2
+; SSE4-NEXT:    psrld $1, %xmm1
+; SSE4-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5],xmm1[6,7]
+; SSE4-NEXT:    pmulld {{.*}}(%rip), %xmm1
+; SSE4-NEXT:    psubd %xmm1, %xmm0
+; SSE4-NEXT:    pxor %xmm1, %xmm1
+; SSE4-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: p5_vector_urem_by_const__nonsplat:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [2863311531,3435973837,2863311531,954437177]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %t0 = and <4 x i32> %x, <i32 128, i32 2, i32 4, i32 8>
   %t1 = urem <4 x i32> %t0, <i32 3, i32 5, i32 6, i32 9>
   %t2 = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
@@ -124,23 +216,63 @@ define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) {
 }
 
 define <4 x i1> @p6_vector_urem_by_const__nonsplat_undef0(<4 x i32> %x, <4 x i32> %y) {
-; CHECK-LABEL: p6_vector_urem_by_const__nonsplat_undef0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128]
-; CHECK-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
-; CHECK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-NEXT:    vpsrld $2, %xmm1, %xmm1
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [6,6,6,6]
-; CHECK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; SSE2-LABEL: p6_vector_urem_by_const__nonsplat_undef0:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    psrld $2, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [6,6,6,6]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    psubd %xmm2, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE4-LABEL: p6_vector_urem_by_const__nonsplat_undef0:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE4-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
+; SSE4-NEXT:    pmuludq %xmm2, %xmm1
+; SSE4-NEXT:    pmuludq %xmm0, %xmm2
+; SSE4-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE4-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; SSE4-NEXT:    psrld $2, %xmm2
+; SSE4-NEXT:    pmulld {{.*}}(%rip), %xmm2
+; SSE4-NEXT:    psubd %xmm2, %xmm0
+; SSE4-NEXT:    pxor %xmm1, %xmm1
+; SSE4-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: p6_vector_urem_by_const__nonsplat_undef0:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128]
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
+; AVX2-NEXT:    vpsrld $2, %xmm1, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [6,6,6,6]
+; AVX2-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %t0 = and <4 x i32> %x, <i32 128, i32 128, i32 undef, i32 128>
   %t1 = urem <4 x i32> %t0, <i32 6, i32 6, i32 6, i32 6> ; '6' is clearly not a power of two
   %t2 = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
@@ -148,23 +280,63 @@ define <4 x i1> @p6_vector_urem_by_const__nonsplat_undef0(<4 x i32> %x, <4 x i32
 }
 
 define <4 x i1> @p7_vector_urem_by_const__nonsplat_undef2(<4 x i32> %x, <4 x i32> %y) {
-; CHECK-LABEL: p7_vector_urem_by_const__nonsplat_undef2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128]
-; CHECK-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
-; CHECK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-NEXT:    vpsrld $2, %xmm1, %xmm1
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [6,6,6,6]
-; CHECK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; SSE2-LABEL: p7_vector_urem_by_const__nonsplat_undef2:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    psrld $2, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [6,6,6,6]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    psubd %xmm2, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE4-LABEL: p7_vector_urem_by_const__nonsplat_undef2:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE4-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
+; SSE4-NEXT:    pmuludq %xmm2, %xmm1
+; SSE4-NEXT:    pmuludq %xmm0, %xmm2
+; SSE4-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE4-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; SSE4-NEXT:    psrld $2, %xmm2
+; SSE4-NEXT:    pmulld {{.*}}(%rip), %xmm2
+; SSE4-NEXT:    psubd %xmm2, %xmm0
+; SSE4-NEXT:    pxor %xmm1, %xmm1
+; SSE4-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: p7_vector_urem_by_const__nonsplat_undef2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128]
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
+; AVX2-NEXT:    vpsrld $2, %xmm1, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [6,6,6,6]
+; AVX2-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %t0 = and <4 x i32> %x, <i32 128, i32 128, i32 128, i32 128> ; clearly a power-of-two or zero
   %t1 = urem <4 x i32> %t0, <i32 6, i32 6, i32 6, i32 6> ; '6' is clearly not a power of two
   %t2 = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
@@ -172,23 +344,63 @@ define <4 x i1> @p7_vector_urem_by_const__nonsplat_undef2(<4 x i32> %x, <4 x i32
 }
 
 define <4 x i1> @p8_vector_urem_by_const__nonsplat_undef3(<4 x i32> %x, <4 x i32> %y) {
-; CHECK-LABEL: p8_vector_urem_by_const__nonsplat_undef3:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128]
-; CHECK-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
-; CHECK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-NEXT:    vpsrld $2, %xmm1, %xmm1
-; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [6,6,6,6]
-; CHECK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; SSE2-LABEL: p8_vector_urem_by_const__nonsplat_undef3:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    psrld $2, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [6,6,6,6]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    psubd %xmm2, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE4-LABEL: p8_vector_urem_by_const__nonsplat_undef3:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE4-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
+; SSE4-NEXT:    pmuludq %xmm2, %xmm1
+; SSE4-NEXT:    pmuludq %xmm0, %xmm2
+; SSE4-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE4-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; SSE4-NEXT:    psrld $2, %xmm2
+; SSE4-NEXT:    pmulld {{.*}}(%rip), %xmm2
+; SSE4-NEXT:    psubd %xmm2, %xmm0
+; SSE4-NEXT:    pxor %xmm1, %xmm1
+; SSE4-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: p8_vector_urem_by_const__nonsplat_undef3:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [128,128,128,128]
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
+; AVX2-NEXT:    vpsrld $2, %xmm1, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [6,6,6,6]
+; AVX2-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %t0 = and <4 x i32> %x, <i32 128, i32 128, i32 undef, i32 128>
   %t1 = urem <4 x i32> %t0, <i32 6, i32 6, i32 6, i32 6> ; '6' is clearly not a power of two
   %t2 = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>


        


More information about the llvm-commits mailing list