[llvm] r304612 - [x86] add tests for unsigned vector compares with known signbits; NFC (PR33276)
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 2 16:24:28 PDT 2017
Author: spatel
Date: Fri Jun 2 18:24:28 2017
New Revision: 304612
URL: http://llvm.org/viewvc/llvm-project?rev=304612&view=rev
Log:
[x86] add tests for unsigned vector compares with known signbits; NFC (PR33276)
Added:
llvm/trunk/test/CodeGen/X86/vector-unsigned-cmp.ll
Added: llvm/trunk/test/CodeGen/X86/vector-unsigned-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-unsigned-cmp.ll?rev=304612&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-unsigned-cmp.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-unsigned-cmp.ll Fri Jun 2 18:24:28 2017
@@ -0,0 +1,519 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+
+; PR33276 - https://bugs.llvm.org/show_bug.cgi?id=33276
+; If both operands of an unsigned icmp are known non-negative, then
+; we don't need to flip the sign bits in order to map to signed pcmpgt*.
+
+define <2 x i1> @ugt_v2i64(<2 x i64> %x, <2 x i64> %y) {
+; SSE-LABEL: ugt_v2i64:
+; SSE: # BB#0:
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE-NEXT: pxor %xmm2, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ugt_v2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <2 x i64> %x, <i64 1, i64 1>
+ %sh2 = lshr <2 x i64> %y, <i64 1, i64 1>
+ %cmp = icmp ugt <2 x i64> %sh1, %sh2
+ ret <2 x i1> %cmp
+}
+
+define <2 x i1> @ult_v2i64(<2 x i64> %x, <2 x i64> %y) {
+; SSE-LABEL: ult_v2i64:
+; SSE: # BB#0:
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ult_v2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <2 x i64> %x, <i64 1, i64 1>
+ %sh2 = lshr <2 x i64> %y, <i64 1, i64 1>
+ %cmp = icmp ult <2 x i64> %sh1, %sh2
+ ret <2 x i1> %cmp
+}
+
+define <2 x i1> @uge_v2i64(<2 x i64> %x, <2 x i64> %y) {
+; SSE-LABEL: uge_v2i64:
+; SSE: # BB#0:
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: uge_v2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <2 x i64> %x, <i64 1, i64 1>
+ %sh2 = lshr <2 x i64> %y, <i64 1, i64 1>
+ %cmp = icmp uge <2 x i64> %sh1, %sh2
+ ret <2 x i1> %cmp
+}
+
+define <2 x i1> @ule_v2i64(<2 x i64> %x, <2 x i64> %y) {
+; SSE-LABEL: ule_v2i64:
+; SSE: # BB#0:
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE-NEXT: pxor %xmm2, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ule_v2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <2 x i64> %x, <i64 1, i64 1>
+ %sh2 = lshr <2 x i64> %y, <i64 1, i64 1>
+ %cmp = icmp ule <2 x i64> %sh1, %sh2
+ ret <2 x i1> %cmp
+}
+
+define <4 x i1> @ugt_v4i32(<4 x i32> %x, <4 x i32> %y) {
+; SSE-LABEL: ugt_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE-NEXT: pxor %xmm2, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: ugt_v4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ugt_v4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+ %sh1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
+ %sh2 = lshr <4 x i32> %y, <i32 1, i32 1, i32 1, i32 1>
+ %cmp = icmp ugt <4 x i32> %sh1, %sh2
+ ret <4 x i1> %cmp
+}
+
+define <4 x i1> @ult_v4i32(<4 x i32> %x, <4 x i32> %y) {
+; SSE-LABEL: ult_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm2
+; SSE-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: ult_v4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ult_v4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: retq
+ %sh1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
+ %sh2 = lshr <4 x i32> %y, <i32 1, i32 1, i32 1, i32 1>
+ %cmp = icmp ult <4 x i32> %sh1, %sh2
+ ret <4 x i1> %cmp
+}
+
+define <4 x i1> @uge_v4i32(<4 x i32> %x, <4 x i32> %y) {
+; SSE2-LABEL: uge_v4i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: psrld $1, %xmm0
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: uge_v4i32:
+; SSE41: # BB#0:
+; SSE41-NEXT: psrld $1, %xmm0
+; SSE41-NEXT: psrld $1, %xmm1
+; SSE41-NEXT: pmaxud %xmm0, %xmm1
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: uge_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
+ %sh2 = lshr <4 x i32> %y, <i32 1, i32 1, i32 1, i32 1>
+ %cmp = icmp uge <4 x i32> %sh1, %sh2
+ ret <4 x i1> %cmp
+}
+
+define <4 x i1> @ule_v4i32(<4 x i32> %x, <4 x i32> %y) {
+; SSE2-LABEL: ule_v4i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: psrld $1, %xmm0
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: ule_v4i32:
+; SSE41: # BB#0:
+; SSE41-NEXT: psrld $1, %xmm0
+; SSE41-NEXT: psrld $1, %xmm1
+; SSE41-NEXT: pminud %xmm0, %xmm1
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: ule_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
+ %sh2 = lshr <4 x i32> %y, <i32 1, i32 1, i32 1, i32 1>
+ %cmp = icmp ule <4 x i32> %sh1, %sh2
+ ret <4 x i1> %cmp
+}
+
+define <8 x i1> @ugt_v8i16(<8 x i16> %x, <8 x i16> %y) {
+; SSE-LABEL: ugt_v8i16:
+; SSE: # BB#0:
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; SSE-NEXT: pxor %xmm2, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pcmpgtw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ugt_v8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <8 x i16> %x, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %sh2 = lshr <8 x i16> %y, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %cmp = icmp ugt <8 x i16> %sh1, %sh2
+ ret <8 x i1> %cmp
+}
+
+define <8 x i1> @ult_v8i16(<8 x i16> %x, <8 x i16> %y) {
+; SSE-LABEL: ult_v8i16:
+; SSE: # BB#0:
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm2
+; SSE-NEXT: pcmpgtw %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ult_v8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <8 x i16> %x, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %sh2 = lshr <8 x i16> %y, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %cmp = icmp ult <8 x i16> %sh1, %sh2
+ ret <8 x i1> %cmp
+}
+
+define <8 x i1> @uge_v8i16(<8 x i16> %x, <8 x i16> %y) {
+; SSE2-LABEL: uge_v8i16:
+; SSE2: # BB#0:
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: psubusw %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: uge_v8i16:
+; SSE41: # BB#0:
+; SSE41-NEXT: psrlw $1, %xmm0
+; SSE41-NEXT: psrlw $1, %xmm1
+; SSE41-NEXT: pmaxuw %xmm0, %xmm1
+; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: uge_v8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <8 x i16> %x, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %sh2 = lshr <8 x i16> %y, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %cmp = icmp uge <8 x i16> %sh1, %sh2
+ ret <8 x i1> %cmp
+}
+
+define <8 x i1> @ule_v8i16(<8 x i16> %x, <8 x i16> %y) {
+; SSE2-LABEL: ule_v8i16:
+; SSE2: # BB#0:
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: psubusw %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: ule_v8i16:
+; SSE41: # BB#0:
+; SSE41-NEXT: psrlw $1, %xmm0
+; SSE41-NEXT: psrlw $1, %xmm1
+; SSE41-NEXT: pminuw %xmm0, %xmm1
+; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: ule_v8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <8 x i16> %x, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %sh2 = lshr <8 x i16> %y, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %cmp = icmp ule <8 x i16> %sh1, %sh2
+ ret <8 x i1> %cmp
+}
+
+define <16 x i1> @ugt_v16i8(<16 x i8> %x, <16 x i8> %y) {
+; SSE-LABEL: ugt_v16i8:
+; SSE: # BB#0:
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pcmpgtb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ugt_v16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <16 x i8> %x, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %sh2 = lshr <16 x i8> %y, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %cmp = icmp ugt <16 x i8> %sh1, %sh2
+ ret <16 x i1> %cmp
+}
+
+define <16 x i1> @ult_v16i8(<16 x i8> %x, <16 x i8> %y) {
+; SSE-LABEL: ult_v16i8:
+; SSE: # BB#0:
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm2
+; SSE-NEXT: pcmpgtb %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ult_v16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <16 x i8> %x, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %sh2 = lshr <16 x i8> %y, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %cmp = icmp ult <16 x i8> %sh1, %sh2
+ ret <16 x i1> %cmp
+}
+
+define <16 x i1> @uge_v16i8(<16 x i8> %x, <16 x i8> %y) {
+; SSE-LABEL: uge_v16i8:
+; SSE: # BB#0:
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pmaxub %xmm0, %xmm1
+; SSE-NEXT: pcmpeqb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: uge_v16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <16 x i8> %x, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %sh2 = lshr <16 x i8> %y, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %cmp = icmp uge <16 x i8> %sh1, %sh2
+ ret <16 x i1> %cmp
+}
+
+define <16 x i1> @ule_v16i8(<16 x i8> %x, <16 x i8> %y) {
+; SSE-LABEL: ule_v16i8:
+; SSE: # BB#0:
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pminub %xmm0, %xmm1
+; SSE-NEXT: pcmpeqb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ule_v16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %sh1 = lshr <16 x i8> %x, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %sh2 = lshr <16 x i8> %y, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %cmp = icmp ule <16 x i8> %sh1, %sh2
+ ret <16 x i1> %cmp
+}
+
More information about the llvm-commits
mailing list