[llvm] 372c808 - [X86] canCreateUndefOrPoisonForTargetNode - PCMPEQ/PCMPGT don't create poison/undef (#146116)

via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 28 09:01:13 PDT 2025


Author: Simon Pilgrim
Date: 2025-06-28T17:01:10+01:00
New Revision: 372c808217773c910c0379a8ceda6293dbbf663b

URL: https://github.com/llvm/llvm-project/commit/372c808217773c910c0379a8ceda6293dbbf663b
DIFF: https://github.com/llvm/llvm-project/commit/372c808217773c910c0379a8ceda6293dbbf663b.diff

LOG: [X86] canCreateUndefOrPoisonForTargetNode - PCMPEQ/PCMPGT don't create poison/undef (#146116)

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/vector-compress.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 307a237e2955c..36920de8cb7c5 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -45093,9 +45093,11 @@ bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode(
   case X86ISD::UNPCKH:
   case X86ISD::UNPCKL:
     return false;
-    // SSE comparisons handle all fcmp cases.
-    // TODO: Add PCMPEQ/GT and CMPM/MM with test coverage.
+    // SSE comparisons handle all icmp/fcmp cases.
+    // TODO: Add CMPM/MM with test coverage.
   case X86ISD::CMPP:
+  case X86ISD::PCMPEQ:
+  case X86ISD::PCMPGT:
     return false;
   case ISD::INTRINSIC_WO_CHAIN:
     switch (Op->getConstantOperandVal(0)) {

diff  --git a/llvm/test/CodeGen/X86/vector-compress.ll b/llvm/test/CodeGen/X86/vector-compress.ll
index e88387a8b7c69..894186f9b343b 100644
--- a/llvm/test/CodeGen/X86/vector-compress.ll
+++ b/llvm/test/CodeGen/X86/vector-compress.ll
@@ -138,22 +138,21 @@ define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64>
 ; AVX2-NEXT:    vmovaps %xmm2, -{{[0-9]+}}(%rsp)
 ; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
 ; AVX2-NEXT:    vmovq %xmm1, %rcx
+; AVX2-NEXT:    andl $1, %ecx
 ; AVX2-NEXT:    movl %ecx, %edx
-; AVX2-NEXT:    subl %eax, %edx
-; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    subq %rax, %rcx
+; AVX2-NEXT:    movl %ecx, %eax
 ; AVX2-NEXT:    andl $1, %eax
-; AVX2-NEXT:    andl $1, %ecx
-; AVX2-NEXT:    addq %rcx, %rax
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rsi
-; AVX2-NEXT:    cmpq $2, %rax
-; AVX2-NEXT:    cmovbq -24(%rsp,%rdx,8), %rsi
+; AVX2-NEXT:    cmpq $2, %rcx
+; AVX2-NEXT:    cmovbq -24(%rsp,%rax,8), %rsi
 ; AVX2-NEXT:    vmovq %xmm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    movl %ecx, %ecx
-; AVX2-NEXT:    vpextrq $1, %xmm0, -24(%rsp,%rcx,8)
-; AVX2-NEXT:    cmpq $1, %rax
-; AVX2-NEXT:    movl $1, %ecx
-; AVX2-NEXT:    cmovbq %rax, %rcx
-; AVX2-NEXT:    movq %rsi, -24(%rsp,%rcx,8)
+; AVX2-NEXT:    vpextrq $1, %xmm0, -24(%rsp,%rdx,8)
+; AVX2-NEXT:    cmpq $1, %rcx
+; AVX2-NEXT:    movl $1, %eax
+; AVX2-NEXT:    cmovbq %rcx, %rax
+; AVX2-NEXT:    movl %eax, %eax
+; AVX2-NEXT:    movq %rsi, -24(%rsp,%rax,8)
 ; AVX2-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
 ; AVX2-NEXT:    retq
 ;
@@ -188,18 +187,16 @@ define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x
 ; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm1
 ; AVX2-NEXT:    vmovaps %xmm2, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX2-NEXT:    vmovq %xmm1, %rcx
-; AVX2-NEXT:    movl %ecx, %edx
-; AVX2-NEXT:    subl %eax, %edx
-; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    vpextrq $1, %xmm1, %rcx
+; AVX2-NEXT:    vmovq %xmm1, %rax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movl %eax, %edx
+; AVX2-NEXT:    subq %rcx, %rax
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    andl $1, %ecx
 ; AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-NEXT:    vmovlpd %xmm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    andl $1, %ecx
-; AVX2-NEXT:    movl %ecx, %edx
 ; AVX2-NEXT:    vmovhpd %xmm0, -24(%rsp,%rdx,8)
-; AVX2-NEXT:    andl $1, %eax
-; AVX2-NEXT:    addq %rcx, %rax
 ; AVX2-NEXT:    cmpq $2, %rax
 ; AVX2-NEXT:    jb .LBB3_2
 ; AVX2-NEXT:  # %bb.1:
@@ -208,7 +205,8 @@ define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x
 ; AVX2-NEXT:    cmpq $1, %rax
 ; AVX2-NEXT:    movl $1, %ecx
 ; AVX2-NEXT:    cmovbq %rax, %rcx
-; AVX2-NEXT:    vmovsd %xmm1, -24(%rsp,%rcx,8)
+; AVX2-NEXT:    movl %ecx, %eax
+; AVX2-NEXT:    vmovsd %xmm1, -24(%rsp,%rax,8)
 ; AVX2-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
 ; AVX2-NEXT:    retq
 ;


        


More information about the llvm-commits mailing list