[llvm] 446581a - [NFC][Codegen] Add `x u% C1 == C2` with C1 u<= C2 tautological tests

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 10 03:23:08 PST 2019


Author: Roman Lebedev
Date: 2019-11-10T14:22:57+03:00
New Revision: 446581a3002c4ebc73691298c8b2d47372c2e98c

URL: https://github.com/llvm/llvm-project/commit/446581a3002c4ebc73691298c8b2d47372c2e98c
DIFF: https://github.com/llvm/llvm-project/commit/446581a3002c4ebc73691298c8b2d47372c2e98c.diff

LOG: [NFC][Codegen] Add `x u% C1 == C2` with C1 u<= C2 tautological tests

Added: 
    llvm/test/CodeGen/AArch64/urem-seteq-vec-tautological.ll
    llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/urem-seteq-vec-tautological.ll b/llvm/test/CodeGen/AArch64/urem-seteq-vec-tautological.ll
new file mode 100644
index 000000000000..9233ad385df3
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/urem-seteq-vec-tautological.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+define <4 x i1> @t0_all_tautological(<4 x i32> %X) nounwind {
+; CHECK-LABEL: t0_all_tautological:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    adrp x8, .LCPI0_1
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI0_1]
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    cmeq v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
+  %urem = urem <4 x i32> %X, <i32 1, i32 1, i32 2, i32 2>
+  %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i1> %cmp
+}
+
+define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind {
+; CHECK-LABEL: t1_all_odd_eq:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI1_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI1_0]
+; CHECK-NEXT:    adrp x8, .LCPI1_1
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI1_1]
+; CHECK-NEXT:    adrp x8, .LCPI1_2
+; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI1_2]
+; CHECK-NEXT:    adrp x8, .LCPI1_3
+; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI1_3]
+; CHECK-NEXT:    adrp x8, .LCPI1_4
+; CHECK-NEXT:    umull2 v5.2d, v0.4s, v1.4s
+; CHECK-NEXT:    umull v1.2d, v0.2s, v1.2s
+; CHECK-NEXT:    neg v2.4s, v2.4s
+; CHECK-NEXT:    uzp2 v1.4s, v1.4s, v5.4s
+; CHECK-NEXT:    ldr q5, [x8, :lo12:.LCPI1_4]
+; CHECK-NEXT:    ushl v1.4s, v1.4s, v2.4s
+; CHECK-NEXT:    bsl v3.16b, v0.16b, v1.16b
+; CHECK-NEXT:    mls v0.4s, v3.4s, v4.4s
+; CHECK-NEXT:    cmeq v0.4s, v0.4s, v5.4s
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
+  %urem = urem <4 x i32> %X, <i32 3, i32 1, i32 1, i32 9>
+  %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 42, i32 0, i32 42>
+  ret <4 x i1> %cmp
+}
+
+define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
+; CHECK-LABEL: t1_all_odd_ne:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI2_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI2_0]
+; CHECK-NEXT:    adrp x8, .LCPI2_1
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI2_1]
+; CHECK-NEXT:    adrp x8, .LCPI2_2
+; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI2_2]
+; CHECK-NEXT:    adrp x8, .LCPI2_3
+; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI2_3]
+; CHECK-NEXT:    adrp x8, .LCPI2_4
+; CHECK-NEXT:    umull2 v5.2d, v0.4s, v1.4s
+; CHECK-NEXT:    umull v1.2d, v0.2s, v1.2s
+; CHECK-NEXT:    neg v2.4s, v2.4s
+; CHECK-NEXT:    uzp2 v1.4s, v1.4s, v5.4s
+; CHECK-NEXT:    ldr q5, [x8, :lo12:.LCPI2_4]
+; CHECK-NEXT:    ushl v1.4s, v1.4s, v2.4s
+; CHECK-NEXT:    bsl v3.16b, v0.16b, v1.16b
+; CHECK-NEXT:    mls v0.4s, v3.4s, v4.4s
+; CHECK-NEXT:    cmeq v0.4s, v0.4s, v5.4s
+; CHECK-NEXT:    mvn v0.16b, v0.16b
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
+  %urem = urem <4 x i32> %X, <i32 3, i32 1, i32 1, i32 9>
+  %cmp = icmp ne <4 x i32> %urem, <i32 0, i32 42, i32 0, i32 42>
+  ret <4 x i1> %cmp
+}
+
+define <8 x i1> @t2_narrow(<8 x i16> %X) nounwind {
+; CHECK-LABEL: t2_narrow:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI3_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    adrp x8, .LCPI3_1
+; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI3_1]
+; CHECK-NEXT:    adrp x8, .LCPI3_2
+; CHECK-NEXT:    umull2 v4.4s, v0.8h, v1.8h
+; CHECK-NEXT:    umull v1.4s, v0.4h, v1.4h
+; CHECK-NEXT:    uzp2 v1.8h, v1.8h, v4.8h
+; CHECK-NEXT:    neg v3.8h, v3.8h
+; CHECK-NEXT:    movi v2.2d, #0xffff00000000ffff
+; CHECK-NEXT:    ushl v1.8h, v1.8h, v3.8h
+; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI3_2]
+; CHECK-NEXT:    adrp x8, .LCPI3_3
+; CHECK-NEXT:    movi v4.2d, #0x00ffffffff0000
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI3_3]
+; CHECK-NEXT:    and v4.16b, v0.16b, v4.16b
+; CHECK-NEXT:    orr v1.16b, v4.16b, v1.16b
+; CHECK-NEXT:    mls v0.8h, v1.8h, v3.8h
+; CHECK-NEXT:    cmeq v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NEXT:    ret
+  %urem = urem <8 x i16> %X, <i16 3, i16 1, i16 1, i16 9, i16 3, i16 1, i16 1, i16 9>
+  %cmp = icmp eq <8 x i16> %urem, <i16 0, i16 0, i16 42, i16 42, i16 0, i16 0, i16 42, i16 42>
+  ret <8 x i1> %cmp
+}
+
+define <2 x i1> @t3_wide(<2 x i64> %X) nounwind {
+; CHECK-LABEL: t3_wide:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x9, #-6148914691236517206
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    movk x9, #43691
+; CHECK-NEXT:    adrp x10, .LCPI4_0
+; CHECK-NEXT:    umulh x9, x8, x9
+; CHECK-NEXT:    ldr q0, [x10, :lo12:.LCPI4_0]
+; CHECK-NEXT:    lsr x9, x9, #1
+; CHECK-NEXT:    add x9, x9, x9, lsl #1
+; CHECK-NEXT:    sub x8, x8, x9
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    mov v1.d[0], x8
+; CHECK-NEXT:    cmeq v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    ret
+  %urem = urem <2 x i64> %X, <i64 3, i64 1>
+  %cmp = icmp eq <2 x i64> %urem, <i64 0, i64 42>
+  ret <2 x i1> %cmp
+}

diff  --git a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
new file mode 100644
index 000000000000..ea9ed074622e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
@@ -0,0 +1,347 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SSE,CHECK-SSE2
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SSE,CHECK-SSE41
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx < %s | FileCheck %s --check-prefixes=CHECK,CHECK-AVX,CHECK-AVX1
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-AVX,CHECK-AVX2
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl < %s | FileCheck %s --check-prefixes=CHECK,CHECK-AVX,CHECK-AVX512VL
+
+define <4 x i1> @t0_all_tautological(<4 x i32> %X) nounwind {
+; CHECK-SSE-LABEL: t0_all_tautological:
+; CHECK-SSE:       # %bb.0:
+; CHECK-SSE-NEXT:    pand {{.*}}(%rip), %xmm0
+; CHECK-SSE-NEXT:    pcmpeqd {{.*}}(%rip), %xmm0
+; CHECK-SSE-NEXT:    retq
+;
+; CHECK-AVX-LABEL: t0_all_tautological:
+; CHECK-AVX:       # %bb.0:
+; CHECK-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX-NEXT:    vpcmpeqd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX-NEXT:    retq
+  %urem = urem <4 x i32> %X, <i32 1, i32 1, i32 2, i32 2>
+  %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i1> %cmp
+}
+
+define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind {
+; CHECK-SSE2-LABEL: t1_all_odd_eq:
+; CHECK-SSE2:       # %bb.0:
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,0,0,954437177]
+; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; CHECK-SSE2-NEXT:    psrld $1, %xmm2
+; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm1
+; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[3,3]
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [3,1,1,9]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[2,2,3,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm4
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3]
+; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm4
+; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,2],xmm2[0,3]
+; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0,1,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm3, %xmm4
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; CHECK-SSE2-NEXT:    psubd %xmm2, %xmm0
+; CHECK-SSE2-NEXT:    pcmpeqd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    retq
+;
+; CHECK-SSE41-LABEL: t1_all_odd_eq:
+; CHECK-SSE41:       # %bb.0:
+; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,0,0,954437177]
+; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; CHECK-SSE41-NEXT:    pmuludq %xmm2, %xmm3
+; CHECK-SSE41-NEXT:    pmuludq %xmm0, %xmm1
+; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; CHECK-SSE41-NEXT:    psrld $1, %xmm1
+; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm1
+; CHECK-SSE41-NEXT:    psubd %xmm1, %xmm0
+; CHECK-SSE41-NEXT:    pcmpeqd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    retq
+;
+; CHECK-AVX1-LABEL: t1_all_odd_eq:
+; CHECK-AVX1:       # %bb.0:
+; CHECK-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [2863311531,0,0,954437177]
+; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; CHECK-AVX1-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; CHECK-AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm1
+; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; CHECK-AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpcmpeqd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    retq
+;
+; CHECK-AVX2-LABEL: t1_all_odd_eq:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [2863311531,0,0,954437177]
+; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; CHECK-AVX2-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; CHECK-AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; CHECK-AVX2-NEXT:    vpsrld $1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3]
+; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    retq
+;
+; CHECK-AVX512VL-LABEL: t1_all_odd_eq:
+; CHECK-AVX512VL:       # %bb.0:
+; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [2863311531,0,0,954437177]
+; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; CHECK-AVX512VL-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; CHECK-AVX512VL-NEXT:    vpmuludq %xmm1, %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; CHECK-AVX512VL-NEXT:    vpsrld $1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3]
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpcmpeqd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    retq
+  %urem = urem <4 x i32> %X, <i32 3, i32 1, i32 1, i32 9>
+  %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 42, i32 0, i32 42>
+  ret <4 x i1> %cmp
+}
+
+define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
+; CHECK-SSE2-LABEL: t1_all_odd_ne:
+; CHECK-SSE2:       # %bb.0:
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,0,0,954437177]
+; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; CHECK-SSE2-NEXT:    psrld $1, %xmm2
+; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm1
+; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[3,3]
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [3,1,1,9]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[2,2,3,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm4
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3]
+; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm4
+; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,2],xmm2[0,3]
+; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0,1,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm3, %xmm4
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; CHECK-SSE2-NEXT:    psubd %xmm2, %xmm0
+; CHECK-SSE2-NEXT:    pcmpeqd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-SSE2-NEXT:    pxor %xmm1, %xmm0
+; CHECK-SSE2-NEXT:    retq
+;
+; CHECK-SSE41-LABEL: t1_all_odd_ne:
+; CHECK-SSE41:       # %bb.0:
+; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,0,0,954437177]
+; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; CHECK-SSE41-NEXT:    pmuludq %xmm2, %xmm3
+; CHECK-SSE41-NEXT:    pmuludq %xmm0, %xmm1
+; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; CHECK-SSE41-NEXT:    psrld $1, %xmm1
+; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm1
+; CHECK-SSE41-NEXT:    psubd %xmm1, %xmm0
+; CHECK-SSE41-NEXT:    pcmpeqd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-SSE41-NEXT:    pxor %xmm1, %xmm0
+; CHECK-SSE41-NEXT:    retq
+;
+; CHECK-AVX1-LABEL: t1_all_odd_ne:
+; CHECK-AVX1:       # %bb.0:
+; CHECK-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [2863311531,0,0,954437177]
+; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; CHECK-AVX1-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; CHECK-AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm1
+; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; CHECK-AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpcmpeqd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    retq
+;
+; CHECK-AVX2-LABEL: t1_all_odd_ne:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [2863311531,0,0,954437177]
+; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; CHECK-AVX2-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; CHECK-AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; CHECK-AVX2-NEXT:    vpsrld $1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3]
+; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    retq
+;
+; CHECK-AVX512VL-LABEL: t1_all_odd_ne:
+; CHECK-AVX512VL:       # %bb.0:
+; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [2863311531,0,0,954437177]
+; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; CHECK-AVX512VL-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; CHECK-AVX512VL-NEXT:    vpmuludq %xmm1, %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; CHECK-AVX512VL-NEXT:    vpsrld $1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3]
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpcmpeqd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    retq
+  %urem = urem <4 x i32> %X, <i32 3, i32 1, i32 1, i32 9>
+  %cmp = icmp ne <4 x i32> %urem, <i32 0, i32 42, i32 0, i32 42>
+  ret <4 x i1> %cmp
+}
+
+define <8 x i1> @t2_narrow(<8 x i16> %X) nounwind {
+; CHECK-SSE2-LABEL: t2_narrow:
+; CHECK-SSE2:       # %bb.0:
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [0,65535,65535,0,0,65535,65535,0]
+; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-SSE2-NEXT:    pand %xmm1, %xmm2
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [43691,0,0,58255,43691,0,0,58255]
+; CHECK-SSE2-NEXT:    pmulhuw %xmm0, %xmm3
+; CHECK-SSE2-NEXT:    movdqa %xmm3, %xmm4
+; CHECK-SSE2-NEXT:    psrlw $3, %xmm4
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
+; CHECK-SSE2-NEXT:    psrlw $1, %xmm3
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; CHECK-SSE2-NEXT:    pandn %xmm3, %xmm1
+; CHECK-SSE2-NEXT:    por %xmm2, %xmm1
+; CHECK-SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm1
+; CHECK-SSE2-NEXT:    psubw %xmm1, %xmm0
+; CHECK-SSE2-NEXT:    pcmpeqw {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    retq
+;
+; CHECK-SSE41-LABEL: t2_narrow:
+; CHECK-SSE41:       # %bb.0:
+; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [43691,0,0,58255,43691,0,0,58255]
+; CHECK-SSE41-NEXT:    pmulhuw %xmm0, %xmm1
+; CHECK-SSE41-NEXT:    movdqa %xmm1, %xmm2
+; CHECK-SSE41-NEXT:    psrlw $3, %xmm2
+; CHECK-SSE41-NEXT:    psrlw $1, %xmm1
+; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3,4],xmm0[5,6],xmm1[7]
+; CHECK-SSE41-NEXT:    pmullw {{.*}}(%rip), %xmm1
+; CHECK-SSE41-NEXT:    psubw %xmm1, %xmm0
+; CHECK-SSE41-NEXT:    pcmpeqw {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    retq
+;
+; CHECK-AVX1-LABEL: t2_narrow:
+; CHECK-AVX1:       # %bb.0:
+; CHECK-AVX1-NEXT:    vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT:    vpsrlw $3, %xmm1, %xmm2
+; CHECK-AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3,4],xmm0[5,6],xmm1[7]
+; CHECK-AVX1-NEXT:    vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpsubw %xmm1, %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpcmpeqw {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    retq
+;
+; CHECK-AVX2-LABEL: t2_narrow:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpsrlw $3, %xmm1, %xmm2
+; CHECK-AVX2-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; CHECK-AVX2-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3,4],xmm0[5,6],xmm1[7]
+; CHECK-AVX2-NEXT:    vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpsubw %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqw {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    retq
+;
+; CHECK-AVX512VL-LABEL: t2_narrow:
+; CHECK-AVX512VL:       # %bb.0:
+; CHECK-AVX512VL-NEXT:    vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpsrlw $3, %xmm1, %xmm2
+; CHECK-AVX512VL-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; CHECK-AVX512VL-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3,4],xmm0[5,6],xmm1[7]
+; CHECK-AVX512VL-NEXT:    vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpsubw %xmm1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpcmpeqw {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    retq
+  %urem = urem <8 x i16> %X, <i16 3, i16 1, i16 1, i16 9, i16 3, i16 1, i16 1, i16 9>
+  %cmp = icmp eq <8 x i16> %urem, <i16 0, i16 0, i16 42, i16 42, i16 0, i16 0, i16 42, i16 42>
+  ret <8 x i1> %cmp
+}
+
+define <2 x i1> @t3_wide(<2 x i64> %X) nounwind {
+; CHECK-SSE2-LABEL: t3_wide:
+; CHECK-SSE2:       # %bb.0:
+; CHECK-SSE2-NEXT:    movq %xmm0, %rcx
+; CHECK-SSE2-NEXT:    movabsq $-6148914691236517205, %rdx # imm = 0xAAAAAAAAAAAAAAAB
+; CHECK-SSE2-NEXT:    movq %rcx, %rax
+; CHECK-SSE2-NEXT:    mulq %rdx
+; CHECK-SSE2-NEXT:    shrq %rdx
+; CHECK-SSE2-NEXT:    leaq (%rdx,%rdx,2), %rax
+; CHECK-SSE2-NEXT:    subq %rax, %rcx
+; CHECK-SSE2-NEXT:    movq %rcx, %xmm1
+; CHECK-SSE2-NEXT:    pcmpeqd {{.*}}(%rip), %xmm1
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; CHECK-SSE2-NEXT:    pand %xmm1, %xmm0
+; CHECK-SSE2-NEXT:    retq
+;
+; CHECK-SSE41-LABEL: t3_wide:
+; CHECK-SSE41:       # %bb.0:
+; CHECK-SSE41-NEXT:    movq %xmm0, %rcx
+; CHECK-SSE41-NEXT:    movabsq $-6148914691236517205, %rdx # imm = 0xAAAAAAAAAAAAAAAB
+; CHECK-SSE41-NEXT:    movq %rcx, %rax
+; CHECK-SSE41-NEXT:    mulq %rdx
+; CHECK-SSE41-NEXT:    shrq %rdx
+; CHECK-SSE41-NEXT:    leaq (%rdx,%rdx,2), %rax
+; CHECK-SSE41-NEXT:    subq %rax, %rcx
+; CHECK-SSE41-NEXT:    movq %rcx, %xmm0
+; CHECK-SSE41-NEXT:    pcmpeqq {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    retq
+;
+; CHECK-AVX-LABEL: t3_wide:
+; CHECK-AVX:       # %bb.0:
+; CHECK-AVX-NEXT:    vmovq %xmm0, %rcx
+; CHECK-AVX-NEXT:    movabsq $-6148914691236517205, %rdx # imm = 0xAAAAAAAAAAAAAAAB
+; CHECK-AVX-NEXT:    movq %rcx, %rax
+; CHECK-AVX-NEXT:    mulq %rdx
+; CHECK-AVX-NEXT:    shrq %rdx
+; CHECK-AVX-NEXT:    leaq (%rdx,%rdx,2), %rax
+; CHECK-AVX-NEXT:    subq %rax, %rcx
+; CHECK-AVX-NEXT:    vmovq %rcx, %xmm0
+; CHECK-AVX-NEXT:    vpcmpeqq {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX-NEXT:    retq
+  %urem = urem <2 x i64> %X, <i64 3, i64 1>
+  %cmp = icmp eq <2 x i64> %urem, <i64 0, i64 42>
+  ret <2 x i1> %cmp
+}


        


More information about the llvm-commits mailing list