[llvm] [X86] EmitCmp - use existing XOR node to check for equality (PR #125506)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 3 06:28:58 PST 2025
https://github.com/RKSimon created https://github.com/llvm/llvm-project/pull/125506
Normally, we use the result of the SUB flag to scalar comparison as its more compatible with CMP, but if we're testing for equality and already have a XOR we can reuse that instead.
Fixes #6146
>From e58edd493544a5f19dae0cfb14e1f2708ca87ab1 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Mon, 3 Feb 2025 14:27:57 +0000
Subject: [PATCH] [X86] EmitCmp - use existing XOR node to check for equality
Normally, we use the result of the SUB flag to scalar comparison as its more compatible with CMP, but if we're testing for equality and already have a XOR we can reuse that instead.
Fixes #6146
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 13 +++++++++---
llvm/test/CodeGen/X86/cmp-xor.ll | 28 +++++++++----------------
llvm/test/CodeGen/X86/pr32284.ll | 22 ++++++++-----------
3 files changed, 29 insertions(+), 34 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8f904209d8a3ad..662da7df113e5f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -23157,10 +23157,17 @@ static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
return Add.getValue(1);
}
- // Use SUB instead of CMP to enable CSE between SUB and CMP.
+ // If we already have an XOR of the ops, use that to check for equality.
+ // Else use SUB instead of CMP to enable CSE between SUB and CMP.
+ unsigned X86Opc = X86ISD::SUB;
+ if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) &&
+ (DAG.doesNodeExist(ISD::XOR, DAG.getVTList({CmpVT}), {Op0, Op1}) ||
+ DAG.doesNodeExist(ISD::XOR, DAG.getVTList({CmpVT}), {Op1, Op0})))
+ X86Opc = X86ISD::XOR;
+
SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
- SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
- return Sub.getValue(1);
+ SDValue CmpOp = DAG.getNode(X86Opc, dl, VTs, Op0, Op1);
+ return CmpOp.getValue(1);
}
bool X86TargetLowering::isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode Cond,
diff --git a/llvm/test/CodeGen/X86/cmp-xor.ll b/llvm/test/CodeGen/X86/cmp-xor.ll
index 6be5cea3830328..a8a7da4871ba96 100644
--- a/llvm/test/CodeGen/X86/cmp-xor.ll
+++ b/llvm/test/CodeGen/X86/cmp-xor.ll
@@ -9,22 +9,18 @@
define i32 @cmp_xor_i32(i32 %a, i32 %b, i32 %c)
; X86-LABEL: cmp_xor_i32:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpl %ecx, %eax
-; X86-NEXT: je .LBB0_1
-; X86-NEXT: # %bb.2:
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: retl
-; X86-NEXT: .LBB0_1:
+; X86-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: jne .LBB0_2
+; X86-NEXT: # %bb.1:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: .LBB0_2:
; X86-NEXT: retl
;
; X64-LABEL: cmp_xor_i32:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: xorl %esi, %eax
-; X64-NEXT: cmpl %esi, %edi
; X64-NEXT: cmovel %edx, %eax
; X64-NEXT: retq
{
@@ -37,22 +33,18 @@ define i32 @cmp_xor_i32(i32 %a, i32 %b, i32 %c)
define i32 @cmp_xor_i32_commute(i32 %a, i32 %b, i32 %c)
; X86-LABEL: cmp_xor_i32_commute:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpl %eax, %ecx
-; X86-NEXT: je .LBB1_1
-; X86-NEXT: # %bb.2:
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: retl
-; X86-NEXT: .LBB1_1:
+; X86-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: jne .LBB1_2
+; X86-NEXT: # %bb.1:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: .LBB1_2:
; X86-NEXT: retl
;
; X64-LABEL: cmp_xor_i32_commute:
; X64: # %bb.0:
-; X64-NEXT: movl %esi, %eax
-; X64-NEXT: xorl %edi, %eax
-; X64-NEXT: cmpl %esi, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl %esi, %eax
; X64-NEXT: cmovel %edx, %eax
; X64-NEXT: retq
{
diff --git a/llvm/test/CodeGen/X86/pr32284.ll b/llvm/test/CodeGen/X86/pr32284.ll
index 90fb76a23e9a5d..8a726a469b61eb 100644
--- a/llvm/test/CodeGen/X86/pr32284.ll
+++ b/llvm/test/CodeGen/X86/pr32284.ll
@@ -321,11 +321,9 @@ define void @f2() {
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: testl %eax, %eax
; X64-NEXT: sete %cl
-; X64-NEXT: movl %eax, %edx
-; X64-NEXT: xorl %ecx, %edx
-; X64-NEXT: movw %dx, -{{[0-9]+}}(%rsp)
; X64-NEXT: xorl %edx, %edx
-; X64-NEXT: cmpl %eax, %ecx
+; X64-NEXT: xorl %eax, %ecx
+; X64-NEXT: movw %cx, -{{[0-9]+}}(%rsp)
; X64-NEXT: sete %dl
; X64-NEXT: movw %dx, (%rax)
; X64-NEXT: retq
@@ -366,17 +364,15 @@ define void @f2() {
; X86: # %bb.0: # %entry
; X86-NEXT: subl $2, %esp
; X86-NEXT: .cfi_def_cfa_offset 6
-; X86-NEXT: movzbl var_7, %ecx
+; X86-NEXT: movzbl var_7, %edx
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: testl %ecx, %ecx
+; X86-NEXT: testl %edx, %edx
; X86-NEXT: sete %al
-; X86-NEXT: movl %ecx, %edx
-; X86-NEXT: xorl %eax, %edx
-; X86-NEXT: movw %dx, (%esp)
-; X86-NEXT: xorl %edx, %edx
-; X86-NEXT: cmpl %ecx, %eax
-; X86-NEXT: sete %dl
-; X86-NEXT: movw %dx, (%eax)
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: xorl %edx, %eax
+; X86-NEXT: movw %ax, (%esp)
+; X86-NEXT: sete %cl
+; X86-NEXT: movw %cx, (%eax)
; X86-NEXT: addl $2, %esp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
More information about the llvm-commits
mailing list