[llvm] goldsteinn/x86 eq or eq (PR #84104)

via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 5 18:01:00 PST 2024


https://github.com/goldsteinn created https://github.com/llvm/llvm-project/pull/84104

- **[X86] Add tests for folding `(icmp ult (add x**
- **[X86] Fold `(icmp ult (add x**


>From 6d366f713886c590d9e89d26eeb26e45bba6bf8b Mon Sep 17 00:00:00 2001
From: Noah Goldstein <goldstein.w.n at gmail.com>
Date: Tue, 5 Mar 2024 19:47:38 -0600
Subject: [PATCH 1/2] [X86] Add tests for folding `(icmp ult (add x,-C),2)` ->
 `(or (icmp eq X,C), (icmp eq X,C+1))`; NFC

---
 llvm/test/CodeGen/X86/eq-or-eq-range-of-2.ll | 356 +++++++++++++++++++
 1 file changed, 356 insertions(+)
 create mode 100644 llvm/test/CodeGen/X86/eq-or-eq-range-of-2.ll

diff --git a/llvm/test/CodeGen/X86/eq-or-eq-range-of-2.ll b/llvm/test/CodeGen/X86/eq-or-eq-range-of-2.ll
new file mode 100644
index 00000000000000..bd239008c3200c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/eq-or-eq-range-of-2.ll
@@ -0,0 +1,356 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2
+
+declare void @use.v4.i32(<4 x i32>)
+
+define <4 x i32> @eq_or_eq_ult_2(<4 x i32> %x) {
+; AVX512-LABEL: eq_or_eq_ult_2:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpltud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT:    retq
+;
+; AVX2-LABEL: eq_or_eq_ult_2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [4294967291,4294967291,4294967291,4294967291]
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
+; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; SSE41-LABEL: eq_or_eq_ult_2:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [1,1,1,1]
+; SSE41-NEXT:    pminud %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; SSE2-LABEL: eq_or_eq_ult_2:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2147483650,2147483650,2147483650,2147483650]
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    retq
+  %x_adj = add <4 x i32> %x, <i32 -5, i32 -5, i32 -5, i32 -5>
+  %cmp = icmp ult <4 x i32> %x_adj, <i32 2, i32 2, i32 2, i32 2>
+  %r = sext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %r
+}
+
+define <4 x i32> @eq_or_eq_ult_2_only_transform_sse2(<4 x i32> %x) {
+; AVX512-LABEL: eq_or_eq_ult_2_only_transform_sse2:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpltud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k1
+; AVX512-NEXT:    vmovdqa32 %xmm1, %xmm0 {%k1} {z}
+; AVX512-NEXT:    retq
+;
+; AVX2-LABEL: eq_or_eq_ult_2_only_transform_sse2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
+; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; SSE41-LABEL: eq_or_eq_ult_2_only_transform_sse2:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT:    paddd %xmm1, %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [1,1,1,1]
+; SSE41-NEXT:    pminud %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; SSE2-LABEL: eq_or_eq_ult_2_only_transform_sse2:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT:    paddd %xmm0, %xmm1
+; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [2147483650,2147483650,2147483650,2147483650]
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+  %x_adj = add <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
+  %cmp = icmp ult <4 x i32> %x_adj, <i32 2, i32 2, i32 2, i32 2>
+  %r = sext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %r
+}
+
+define <4 x i32> @eq_or_eq_ult_2_fail_multiuse(<4 x i32> %x) {
+; AVX512-LABEL: eq_or_eq_ult_2_fail_multiuse:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    subq $24, %rsp
+; AVX512-NEXT:    .cfi_def_cfa_offset 32
+; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT:    callq use.v4.i32 at PLT
+; AVX512-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX512-NEXT:    vpcmpltud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k1
+; AVX512-NEXT:    vmovdqa32 {{.*#+}} xmm0 {%k1} {z} = [4294967295,4294967295,4294967295,4294967295]
+; AVX512-NEXT:    addq $24, %rsp
+; AVX512-NEXT:    .cfi_def_cfa_offset 8
+; AVX512-NEXT:    retq
+;
+; AVX2-LABEL: eq_or_eq_ult_2_fail_multiuse:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    subq $24, %rsp
+; AVX2-NEXT:    .cfi_def_cfa_offset 32
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT:    callq use.v4.i32 at PLT
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [1,1,1,1]
+; AVX2-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX2-NEXT:    vpminud %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    addq $24, %rsp
+; AVX2-NEXT:    .cfi_def_cfa_offset 8
+; AVX2-NEXT:    retq
+;
+; SSE41-LABEL: eq_or_eq_ult_2_fail_multiuse:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    subq $24, %rsp
+; SSE41-NEXT:    .cfi_def_cfa_offset 32
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT:    paddd %xmm1, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, (%rsp) # 16-byte Spill
+; SSE41-NEXT:    callq use.v4.i32 at PLT
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm0 = [1,1,1,1]
+; SSE41-NEXT:    movdqa (%rsp), %xmm1 # 16-byte Reload
+; SSE41-NEXT:    pminud %xmm1, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    addq $24, %rsp
+; SSE41-NEXT:    .cfi_def_cfa_offset 8
+; SSE41-NEXT:    retq
+;
+; SSE2-LABEL: eq_or_eq_ult_2_fail_multiuse:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    subq $24, %rsp
+; SSE2-NEXT:    .cfi_def_cfa_offset 32
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT:    paddd %xmm0, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, (%rsp) # 16-byte Spill
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    callq use.v4.i32 at PLT
+; SSE2-NEXT:    movdqa (%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [2147483650,2147483650,2147483650,2147483650]
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    addq $24, %rsp
+; SSE2-NEXT:    .cfi_def_cfa_offset 8
+; SSE2-NEXT:    retq
+  %x_adj = add <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
+  call void @use.v4.i32(<4 x i32> %x_adj)
+  %cmp = icmp ult <4 x i32> %x_adj, <i32 2, i32 2, i32 2, i32 2>
+  %r = sext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %r
+}
+
+define <4 x i32> @eq_or_eq_ult_3_fail(<4 x i32> %x) {
+; AVX512-LABEL: eq_or_eq_ult_3_fail:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpltud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k1
+; AVX512-NEXT:    vmovdqa32 %xmm1, %xmm0 {%k1} {z}
+; AVX512-NEXT:    retq
+;
+; AVX2-LABEL: eq_or_eq_ult_3_fail:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2,2,2,2]
+; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; SSE41-LABEL: eq_or_eq_ult_3_fail:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT:    paddd %xmm1, %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [2,2,2,2]
+; SSE41-NEXT:    pminud %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; SSE2-LABEL: eq_or_eq_ult_3_fail:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT:    paddd %xmm0, %xmm1
+; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [2147483651,2147483651,2147483651,2147483651]
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+  %x_adj = add <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
+  %cmp = icmp ult <4 x i32> %x_adj, <i32 3, i32 3, i32 3, i32 3>
+  %r = sext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %r
+}
+
+define <4 x i32> @eq_or_eq_ugt_m3(<4 x i32> %x) {
+; AVX512-LABEL: eq_or_eq_ugt_m3:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpnleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT:    retq
+;
+; AVX2-LABEL: eq_or_eq_ugt_m3:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
+; AVX2-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; SSE41-LABEL: eq_or_eq_ugt_m3:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
+; SSE41-NEXT:    pmaxud %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; SSE2-LABEL: eq_or_eq_ugt_m3:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    retq
+  %x_adj = add <4 x i32> %x, <i32 -11, i32 -14, i32 -11, i32 -11>
+  %cmp = icmp ugt <4 x i32> %x_adj, <i32 -3, i32 -3, i32 -3, i32 -3>
+  %r = sext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %r
+}
+
+define <4 x i32> @eq_or_eq_ule_1(<4 x i32> %x) {
+; AVX512-LABEL: eq_or_eq_ule_1:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT:    retq
+;
+; AVX2-LABEL: eq_or_eq_ule_1:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
+; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; SSE41-LABEL: eq_or_eq_ule_1:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [1,1,1,1]
+; SSE41-NEXT:    pminud %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; SSE2-LABEL: eq_or_eq_ule_1:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT:    pxor %xmm1, %xmm0
+; SSE2-NEXT:    retq
+  %x_adj = add <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
+  %cmp = icmp ule <4 x i32> %x_adj, <i32 1, i32 1, i32 1, i32 1>
+  %r = sext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %r
+}
+
+define <4 x i32> @eq_or_eq_uge_m2(<4 x i32> %x) {
+; AVX512-LABEL: eq_or_eq_uge_m2:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpnltud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT:    retq
+;
+; AVX2-LABEL: eq_or_eq_uge_m2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
+; AVX2-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; SSE41-LABEL: eq_or_eq_uge_m2:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
+; SSE41-NEXT:    pmaxud %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; SSE2-LABEL: eq_or_eq_uge_m2:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2147483646,2147483646,2147483646,2147483646]
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm0
+; SSE2-NEXT:    retq
+  %x_adj = add <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
+  %cmp = icmp uge <4 x i32> %x_adj, <i32 -2, i32 -2, i32 -2, i32 -2>
+  %r = sext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %r
+}
+
+define <4 x i32> @eq_or_eq_uge_2_fail_(<4 x i32> %x) {
+; AVX512-LABEL: eq_or_eq_uge_2_fail_:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpnltud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT:    retq
+;
+; AVX2-LABEL: eq_or_eq_uge_2_fail_:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2,2,2,2]
+; AVX2-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; SSE41-LABEL: eq_or_eq_uge_2_fail_:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [2,2,2,2]
+; SSE41-NEXT:    pmaxud %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; SSE2-LABEL: eq_or_eq_uge_2_fail_:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2147483650,2147483650,2147483650,2147483650]
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm0
+; SSE2-NEXT:    retq
+  %x_adj = add <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
+  %cmp = icmp uge <4 x i32> %x_adj, <i32 2, i32 2, i32 2, i32 2>
+  %r = sext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %r
+}

>From 7447002806757ad462716b5839afd6164a0d5f16 Mon Sep 17 00:00:00 2001
From: Noah Goldstein <goldstein.w.n at gmail.com>
Date: Tue, 5 Mar 2024 19:44:41 -0600
Subject: [PATCH 2/2] [X86] Fold `(icmp ult (add x,-C),2)` -> `(or (icmp eq
 X,C), (icmp eq X,C+1))` for Vectors

This is undoing a middle-end transform which does the opposite. Since
X86 doesn't have unsigned vector comparison instructions pre-AVX512,
the simplified form gets worse codegen.

Fixes #66479

Proofs: https://alive2.llvm.org/ce/z/UCz3wt
---
 llvm/lib/Target/X86/X86ISelLowering.cpp      |  59 ++++++++++
 llvm/test/CodeGen/X86/eq-or-eq-range-of-2.ll | 107 +++++++++----------
 2 files changed, 109 insertions(+), 57 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6eaaec407dbb08..99061c900fe43b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -64,6 +64,7 @@
 #include <algorithm>
 #include <bitset>
 #include <cctype>
+#include <llvm-19/llvm/CodeGen/ISDOpcodes.h>
 #include <numeric>
 using namespace llvm;
 
@@ -53408,6 +53409,64 @@ static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
           truncateAVX512SetCCNoBWI(VT, OpVT, LHS, RHS, CC, DL, DAG, Subtarget))
     return R;
 
+  // In the middle end transforms:
+  //    `(or (icmp eq X, C), (icmp eq X, C+1))`
+  //        -> `(icmp ult (add x, -C), 2)`
+  // Likewise inverted cases with `ugt`.
+  //
+  // Since x86, pre avx512, doesn't have unsigned vector compares, this results
+  // in worse codegen. So, undo the middle-end transform and go back to `(or
+  // (icmp eq), (icmp eq))` form.
+  //
+  // NB: We don't handle the similiar simplication of `(and (icmp ne), (icmp
+  // ne))` as it doesn't end up instruction positive.
+  // TODO: We might want to do this for avx512 as well if we `sext` the result.
+  if (VT.isVector() && OpVT.isVector() && OpVT.isInteger() &&
+      ISD::isUnsignedIntSetCC(CC) && LHS.getOpcode() == ISD::ADD &&
+      !Subtarget.hasAVX512() && LHS.hasOneUse()) {
+
+    APInt CmpC;
+      SDValue AddC = LHS.getOperand(1);
+    if (ISD::isConstantSplatVector(RHS.getNode(), CmpC) &&
+        DAG.isConstantIntBuildVectorOrConstantInt(AddC)) {
+      // See which form we have depending on the constant/condition.
+      SDValue C0 = SDValue();
+      SDValue C1 = SDValue();
+
+      // If we had `(add x, -1)` and can lower with `umin`, don't transform as
+      // we will end up generating an additional constant. Keeping in the
+      // current form has a slight latency cost, but it probably worth saving a
+      // constant.
+      if (ISD::isConstantSplatVectorAllOnes(AddC.getNode()) &&
+          DAG.getTargetLoweringInfo().isOperationLegal(ISD::UMIN, OpVT)) {
+        // Pass
+      }
+      // Normal Cases
+      else if ((CC == ISD::SETULT && CmpC == 2) ||
+               (CC == ISD::SETULE && CmpC == 1)) {
+        // These will constant fold.
+        C0 = DAG.getNegative(AddC, DL, OpVT);
+        C1 = DAG.getNode(ISD::SUB, DL, OpVT, C0,
+                         DAG.getAllOnesConstant(DL, OpVT));
+      }
+      // Inverted Cases
+      else if ((CC == ISD::SETUGT && (-CmpC) == 3) ||
+               (CC == ISD::SETUGE && (-CmpC) == 2)) {
+        // These will constant fold.
+        C0 = DAG.getNOT(DL, AddC, OpVT);
+        C1 = DAG.getNode(ISD::ADD, DL, OpVT, C0,
+                         DAG.getAllOnesConstant(DL, OpVT));
+      }
+      if (C0 && C1) {
+        SDValue NewLHS =
+            DAG.getSetCC(DL, VT, LHS.getOperand(0), C0, ISD::SETEQ);
+        SDValue NewRHS =
+            DAG.getSetCC(DL, VT, LHS.getOperand(0), C1, ISD::SETEQ);
+        return DAG.getNode(ISD::OR, DL, VT, NewLHS, NewRHS);
+      }
+    }
+  }
+
   // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
   // to avoid scalarization via legalization because v4i32 is not a legal type.
   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
diff --git a/llvm/test/CodeGen/X86/eq-or-eq-range-of-2.ll b/llvm/test/CodeGen/X86/eq-or-eq-range-of-2.ll
index bd239008c3200c..249c5ff006f70a 100644
--- a/llvm/test/CodeGen/X86/eq-or-eq-range-of-2.ll
+++ b/llvm/test/CodeGen/X86/eq-or-eq-range-of-2.ll
@@ -17,28 +17,27 @@ define <4 x i32> @eq_or_eq_ult_2(<4 x i32> %x) {
 ;
 ; AVX2-LABEL: eq_or_eq_ult_2:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [4294967291,4294967291,4294967291,4294967291]
-; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
-; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [6,6,6,6]
+; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [5,5,5,5]
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; SSE41-LABEL: eq_or_eq_ult_2:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [1,1,1,1]
-; SSE41-NEXT:    pminud %xmm0, %xmm1
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [6,6,6,6]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; SSE2-LABEL: eq_or_eq_ult_2:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2147483650,2147483650,2147483650,2147483650]
-; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [6,6,6,6]
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    por %xmm1, %xmm0
 ; SSE2-NEXT:    retq
   %x_adj = add <4 x i32> %x, <i32 -5, i32 -5, i32 -5, i32 -5>
   %cmp = icmp ult <4 x i32> %x_adj, <i32 2, i32 2, i32 2, i32 2>
@@ -75,11 +74,10 @@ define <4 x i32> @eq_or_eq_ult_2_only_transform_sse2(<4 x i32> %x) {
 ;
 ; SSE2-LABEL: eq_or_eq_ult_2_only_transform_sse2:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE2-NEXT:    paddd %xmm0, %xmm1
-; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [2147483650,2147483650,2147483650,2147483650]
-; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2,2,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    por %xmm1, %xmm0
 ; SSE2-NEXT:    retq
   %x_adj = add <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
   %cmp = icmp ult <4 x i32> %x_adj, <i32 2, i32 2, i32 2, i32 2>
@@ -210,25 +208,25 @@ define <4 x i32> @eq_or_eq_ugt_m3(<4 x i32> %x) {
 ;
 ; AVX2-LABEL: eq_or_eq_ugt_m3:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
-; AVX2-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; SSE41-LABEL: eq_or_eq_ugt_m3:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
-; SSE41-NEXT:    pmaxud %xmm0, %xmm1
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [9,12,9,9]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; SSE2-LABEL: eq_or_eq_ugt_m3:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [9,12,9,9]
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    por %xmm1, %xmm0
 ; SSE2-NEXT:    retq
   %x_adj = add <4 x i32> %x, <i32 -11, i32 -14, i32 -11, i32 -11>
   %cmp = icmp ugt <4 x i32> %x_adj, <i32 -3, i32 -3, i32 -3, i32 -3>
@@ -247,27 +245,25 @@ define <4 x i32> @eq_or_eq_ule_1(<4 x i32> %x) {
 ;
 ; AVX2-LABEL: eq_or_eq_ule_1:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
-; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; SSE41-LABEL: eq_or_eq_ule_1:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [1,1,1,1]
-; SSE41-NEXT:    pminud %xmm0, %xmm1
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [0,4294967295,4294967294,4294967293]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; SSE2-LABEL: eq_or_eq_ule_1:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE2-NEXT:    pxor %xmm1, %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [0,4294967295,4294967294,4294967293]
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    por %xmm1, %xmm0
 ; SSE2-NEXT:    retq
   %x_adj = add <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
   %cmp = icmp ule <4 x i32> %x_adj, <i32 1, i32 1, i32 1, i32 1>
@@ -286,28 +282,25 @@ define <4 x i32> @eq_or_eq_uge_m2(<4 x i32> %x) {
 ;
 ; AVX2-LABEL: eq_or_eq_uge_m2:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
-; AVX2-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; SSE41-LABEL: eq_or_eq_uge_m2:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [4294967294,4294967294,4294967294,4294967294]
-; SSE41-NEXT:    pmaxud %xmm0, %xmm1
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT:    pmovsxbd {{.*#+}} xmm1 = [4294967293,4294967292,4294967291,4294967290]
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE41-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT:    por %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; SSE2-LABEL: eq_or_eq_uge_m2:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2147483646,2147483646,2147483646,2147483646]
-; SSE2-NEXT:    pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
-; SSE2-NEXT:    pxor %xmm1, %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [4294967293,4294967292,4294967291,4294967290]
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    por %xmm1, %xmm0
 ; SSE2-NEXT:    retq
   %x_adj = add <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
   %cmp = icmp uge <4 x i32> %x_adj, <i32 -2, i32 -2, i32 -2, i32 -2>



More information about the llvm-commits mailing list