[llvm] 1c420cd - Precommit test for #61120
Jun Zhang via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 22 21:49:31 PDT 2023
Author: Jun Zhang
Date: 2023-03-23T12:48:59+08:00
New Revision: 1c420cd4e31f68fedca83b4d3a857a5519f4ce03
URL: https://github.com/llvm/llvm-project/commit/1c420cd4e31f68fedca83b4d3a857a5519f4ce03
DIFF: https://github.com/llvm/llvm-project/commit/1c420cd4e31f68fedca83b4d3a857a5519f4ce03.diff
LOG: Precommit test for #61120
Signed-off-by: Jun Zhang <jun at junz.org>
Added:
Modified:
llvm/test/CodeGen/X86/setcc-combine.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/setcc-combine.ll b/llvm/test/CodeGen/X86/setcc-combine.ll
index c2ee78989ba16..f9542a3268ca8 100644
--- a/llvm/test/CodeGen/X86/setcc-combine.ll
+++ b/llvm/test/CodeGen/X86/setcc-combine.ll
@@ -499,3 +499,563 @@ define double @ogt_no_zero(double %x) {
%r = select i1 %cmp, double %x, double %neg
ret double %r
}
+
+define i64 @cmp_sgt_not(i64 %a, i64 %b) {
+; CHECK-LABEL: cmp_sgt_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: notq %rsi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: setg %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp sgt i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_sgt_not_with_constant(i64 %a) {
+; CHECK-LABEL: cmp_sgt_not_with_constant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq $43, %rdi
+; CHECK-NEXT: setge %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp sgt i64 %na, 42
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <4 x i32> @cmp_sgt_not_with_vec(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: cmp_sgt_not_with_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcmpeqd %xmm2, %xmm2
+; CHECK-NEXT: pxor %xmm2, %xmm0
+; CHECK-NEXT: pxor %xmm2, %xmm1
+; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %na = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %nb = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = icmp sgt <4 x i32> %na, %nb
+ %r = sext <4 x i1> %c to <4 x i32>
+ ret <4 x i32> %r
+}
+
+define i64 @cmp_ugt_not(i64 %a, i64 %b) {
+; CHECK-LABEL: cmp_ugt_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: addq %rsi, %rdi
+; CHECK-NEXT: sbbq %rax, %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp ugt i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_ugt_not_with_constant(i64 %a) {
+; CHECK-LABEL: cmp_ugt_not_with_constant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq $43, %rdi
+; CHECK-NEXT: adcq $-1, %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp ugt i64 %na, 42
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <4 x i32> @cmp_ugt_not_with_vec(<4 x i32> %a, <4 x i32> %b) {
+; SSE2-LABEL: cmp_ugt_not_with_vec:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
+; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: cmp_ugt_not_with_vec:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm1
+; SSE41-NEXT: pminud %xmm0, %xmm1
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: retq
+ %na = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %nb = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = icmp ugt <4 x i32> %na, %nb
+ %r = sext <4 x i1> %c to <4 x i32>
+ ret <4 x i32> %r
+}
+
+define i64 @cmp_sge_not(i64 %a, i64 %b) {
+; CHECK-LABEL: cmp_sge_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: notq %rsi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: setge %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp sge i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_sge_not_with_constant(i64 %a) {
+; CHECK-LABEL: cmp_sge_not_with_constant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq $42, %rdi
+; CHECK-NEXT: setge %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp sge i64 %na, 42
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <4 x i32> @cmp_sge_not_with_vec(<4 x i32> %a, <4 x i32> %b) {
+; SSE2-LABEL: cmp_sge_not_with_vec:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: cmp_sge_not_with_vec:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm2
+; SSE41-NEXT: pmaxud %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE41-NEXT: retq
+ %na = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %nb = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = icmp uge <4 x i32> %na, %nb
+ %r = sext <4 x i1> %c to <4 x i32>
+ ret <4 x i32> %r
+}
+
+define i64 @cmp_uge_not(i64 %a, i64 %b) {
+; CHECK-LABEL: cmp_uge_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: notq %rsi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: adcq $-1, %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp uge i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_uge_not_with_constant(i64 %a) {
+; CHECK-LABEL: cmp_uge_not_with_constant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq $42, %rdi
+; CHECK-NEXT: adcq $-1, %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp uge i64 %na, 42
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <4 x i32> @cmp_uge_not_with_vec(<4 x i32> %a, <4 x i32> %b) {
+; SSE2-LABEL: cmp_uge_not_with_vec:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: cmp_uge_not_with_vec:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm2
+; SSE41-NEXT: pmaxud %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE41-NEXT: retq
+ %na = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %nb = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = icmp uge <4 x i32> %na, %nb
+ %r = sext <4 x i1> %c to <4 x i32>
+ ret <4 x i32> %r
+}
+
+define i64 @cmp_sle_not(i64 %a, i64 %b) {
+; CHECK-LABEL: cmp_sle_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: notq %rsi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: setle %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp sle i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_sle_not_with_constant(i64 %a) {
+; CHECK-LABEL: cmp_sle_not_with_constant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq $43, %rdi
+; CHECK-NEXT: setl %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp sle i64 %na, 42
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <4 x i32> @cmp_sle_not_with_vec(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: cmp_sle_not_with_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcmpeqd %xmm2, %xmm2
+; CHECK-NEXT: pxor %xmm2, %xmm0
+; CHECK-NEXT: pxor %xmm2, %xmm1
+; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
+; CHECK-NEXT: pxor %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %na = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %nb = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = icmp sle <4 x i32> %na, %nb
+ %r = sext <4 x i1> %c to <4 x i32>
+ ret <4 x i32> %r
+}
+
+define i64 @cmp_slt_not(i64 %a, i64 %b) {
+; CHECK-LABEL: cmp_slt_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: notq %rsi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: setl %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp slt i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_slt_not_with_constant(i64 %a) {
+; CHECK-LABEL: cmp_slt_not_with_constant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq $42, %rdi
+; CHECK-NEXT: setl %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp slt i64 %na, 42
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <4 x i32> @cmp_slt_not_with_vec(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: cmp_slt_not_with_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcmpeqd %xmm2, %xmm2
+; CHECK-NEXT: pxor %xmm2, %xmm0
+; CHECK-NEXT: pxor %xmm1, %xmm2
+; CHECK-NEXT: pcmpgtd %xmm0, %xmm2
+; CHECK-NEXT: movdqa %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %na = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %nb = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = icmp slt <4 x i32> %na, %nb
+ %r = sext <4 x i1> %c to <4 x i32>
+ ret <4 x i32> %r
+}
+
+
+define i64 @cmp_ult_not(i64 %a, i64 %b) {
+; CHECK-LABEL: cmp_ult_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rsi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: addq %rdi, %rsi
+; CHECK-NEXT: sbbq %rax, %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp ult i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_ult_not_with_constant(i64 %a) {
+; CHECK-LABEL: cmp_ult_not_with_constant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: addq $42, %rdi
+; CHECK-NEXT: sbbq %rax, %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp ult i64 %na, 42
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <4 x i32> @cmp_ult_not_with_vec(<4 x i32> %a, <4 x i32> %b) {
+; SSE2-LABEL: cmp_ult_not_with_vec:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: cmp_ult_not_with_vec:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm1
+; SSE41-NEXT: pmaxud %xmm0, %xmm1
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: retq
+ %na = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %nb = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = icmp ult <4 x i32> %na, %nb
+ %r = sext <4 x i1> %c to <4 x i32>
+ ret <4 x i32> %r
+}
+
+define i64 @cmp_ule_not(i64 %a, i64 %b) {
+; CHECK-LABEL: cmp_ule_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: notq %rsi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq %rdi, %rsi
+; CHECK-NEXT: adcq $-1, %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp ule i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_ule_not_with_constant(i64 %a) {
+; CHECK-LABEL: cmp_ule_not_with_constant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq $43, %rdi
+; CHECK-NEXT: sbbq %rax, %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp ule i64 %na, 42
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <4 x i32> @cmp_ule_not_with_vec(<4 x i32> %a, <4 x i32> %b) {
+; SSE2-LABEL: cmp_ule_not_with_vec:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647,2147483647,2147483647]
+; SSE2-NEXT: pxor %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: cmp_ule_not_with_vec:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm2
+; SSE41-NEXT: pminud %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE41-NEXT: retq
+ %na = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %nb = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = icmp ule <4 x i32> %na, %nb
+ %r = sext <4 x i1> %c to <4 x i32>
+ ret <4 x i32> %r
+}
+
+define i64 @cmp_eq_not(i64 %a, i64 %b) {
+; CHECK-LABEL: cmp_eq_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp eq i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_eq_not_with_constant(i64 %a) {
+; CHECK-LABEL: cmp_eq_not_with_constant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq $-43, %rdi
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp eq i64 %na, 42
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <4 x i32> @cmp_eq_not_with_vec(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: cmp_eq_not_with_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %na = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %nb = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = icmp eq <4 x i32> %na, %nb
+ %r = sext <4 x i1> %c to <4 x i32>
+ ret <4 x i32> %r
+}
+define i64 @cmp_ne_not(i64 %a, i64 %b) {
+; CHECK-LABEL: cmp_ne_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp ne i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_ne_not_with_constant(i64 %a) {
+; CHECK-LABEL: cmp_ne_not_with_constant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq $-43, %rdi
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp ne i64 %na, 42
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <4 x i32> @cmp_ne_not_with_vec(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: cmp_ne_not_with_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
+; CHECK-NEXT: pxor %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %na = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %nb = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = icmp ne <4 x i32> %na, %nb
+ %r = sext <4 x i1> %c to <4 x i32>
+ ret <4 x i32> %r
+}
+
+define i64 @cmp_uge_not_commute(i64 %b, i64 %a) {
+; CHECK-LABEL: cmp_uge_not_commute:
+; CHECK: # %bb.0:
+; CHECK-NEXT: notq %rsi
+; CHECK-NEXT: notq %rdi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq %rdi, %rsi
+; CHECK-NEXT: adcq $-1, %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %nb = xor i64 %b, -1
+ %c = icmp uge i64 %na, %nb
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define i64 @cmp_ult_not_with_constant_commute(i64 %a) {
+; CHECK-LABEL: cmp_ult_not_with_constant_commute:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpq $43, %rdi
+; CHECK-NEXT: adcq $-1, %rax
+; CHECK-NEXT: retq
+ %na = xor i64 %a, -1
+ %c = icmp ult i64 42, %a
+ %r = sext i1 %c to i64
+ ret i64 %r
+}
+
+define <2 x i64> @cmp_uge_not_with_vec2xi64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: cmp_uge_not_with_vec2xi64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [9223372034707292159,9223372034707292159]
+; CHECK-NEXT: pxor %xmm2, %xmm0
+; CHECK-NEXT: pxor %xmm2, %xmm1
+; CHECK-NEXT: movdqa %xmm1, %xmm2
+; CHECK-NEXT: pcmpgtd %xmm0, %xmm2
+; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; CHECK-NEXT: pcmpeqd %xmm0, %xmm1
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; CHECK-NEXT: pand %xmm3, %xmm0
+; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; CHECK-NEXT: por %xmm0, %xmm1
+; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
+; CHECK-NEXT: pxor %xmm1, %xmm0
+; CHECK-NEXT: retq
+ %na = xor <2 x i64> %a, <i64 -1, i64 -1>
+ %nb = xor <2 x i64> %b, <i64 -1, i64 -1>
+ %c = icmp uge <2 x i64> %na, %nb
+ %r = sext <2 x i1> %c to <2 x i64>
+ ret <2 x i64> %r
+}
More information about the llvm-commits
mailing list