[llvm] [X86] Allow all legal integers to optimize smin with 0 (PR #151893)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 19 03:37:41 PDT 2025
https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/151893
>From 2cda0f01cc017b3b7d13a69387f84794f05b9449 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Sun, 3 Aug 2025 21:56:30 -0400
Subject: [PATCH] [X86] Allow all legal integers to have optimize smin with 0.
It makes no sense why smin has to be limited to 32 and 64 bits.
hasAndNot only exists for 32 and 64 bits, so this does not affect smax.
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 2 +-
llvm/test/CodeGen/X86/select-smin-smax.ll | 66 ++++++++---------------
2 files changed, 22 insertions(+), 46 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index f81efdc6414aa..457e959134e25 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -25093,7 +25093,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
} else if (SDValue R = LowerSELECTWithCmpZero(CmpOp0, Op1, Op2, CondCode,
DL, DAG, Subtarget)) {
return R;
- } else if ((VT == MVT::i32 || VT == MVT::i64) && isNullConstant(Op2) &&
+ } else if (VT.isScalarInteger() && isNullConstant(Op2) &&
Cmp.getNode()->hasOneUse() && (CmpOp0 == Op1) &&
((CondCode == X86::COND_S) || // smin(x, 0)
(CondCode == X86::COND_G && hasAndNot(Op1)))) { // smax(x, 0)
diff --git a/llvm/test/CodeGen/X86/select-smin-smax.ll b/llvm/test/CodeGen/X86/select-smin-smax.ll
index 513983ba54bcf..3c2ec52f2c261 100644
--- a/llvm/test/CodeGen/X86/select-smin-smax.ll
+++ b/llvm/test/CodeGen/X86/select-smin-smax.ll
@@ -50,31 +50,18 @@ define i8 @test_i8_smax(i8 %a) nounwind {
define i8 @test_i8_smin(i8 %a) nounwind {
; X64-LABEL: test_i8_smin:
; X64: # %bb.0:
-; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: testb %dil, %dil
-; X64-NEXT: cmovsl %edi, %eax
-; X64-NEXT: # kill: def $al killed $al killed $eax
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: sarb $7, %al
+; X64-NEXT: andb %dil, %al
; X64-NEXT: retq
;
-; X86-BMI-LABEL: test_i8_smin:
-; X86-BMI: # %bb.0:
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-BMI-NEXT: xorl %eax, %eax
-; X86-BMI-NEXT: testb %cl, %cl
-; X86-BMI-NEXT: cmovsl %ecx, %eax
-; X86-BMI-NEXT: # kill: def $al killed $al killed $eax
-; X86-BMI-NEXT: retl
-;
-; X86-NOBMI-LABEL: test_i8_smin:
-; X86-NOBMI: # %bb.0:
-; X86-NOBMI-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X86-NOBMI-NEXT: testb %al, %al
-; X86-NOBMI-NEXT: js .LBB1_2
-; X86-NOBMI-NEXT: # %bb.1:
-; X86-NOBMI-NEXT: xorl %eax, %eax
-; X86-NOBMI-NEXT: .LBB1_2:
-; X86-NOBMI-NEXT: # kill: def $al killed $al killed $eax
-; X86-NOBMI-NEXT: retl
+; X86-LABEL: test_i8_smin:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: sarb $7, %al
+; X86-NEXT: andb %cl, %al
+; X86-NEXT: retl
%r = call i8 @llvm.smin.i8(i8 %a, i8 0)
ret i8 %r
}
@@ -114,31 +101,20 @@ define i16 @test_i16_smax(i16 %a) nounwind {
define i16 @test_i16_smin(i16 %a) nounwind {
; X64-LABEL: test_i16_smin:
; X64: # %bb.0:
-; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: testw %di, %di
-; X64-NEXT: cmovsl %edi, %eax
+; X64-NEXT: movswl %di, %eax
+; X64-NEXT: sarl $15, %eax
+; X64-NEXT: andl %edi, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
-; X86-BMI-LABEL: test_i16_smin:
-; X86-BMI: # %bb.0:
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-BMI-NEXT: xorl %eax, %eax
-; X86-BMI-NEXT: testw %cx, %cx
-; X86-BMI-NEXT: cmovsl %ecx, %eax
-; X86-BMI-NEXT: # kill: def $ax killed $ax killed $eax
-; X86-BMI-NEXT: retl
-;
-; X86-NOBMI-LABEL: test_i16_smin:
-; X86-NOBMI: # %bb.0:
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NOBMI-NEXT: testw %ax, %ax
-; X86-NOBMI-NEXT: js .LBB3_2
-; X86-NOBMI-NEXT: # %bb.1:
-; X86-NOBMI-NEXT: xorl %eax, %eax
-; X86-NOBMI-NEXT: .LBB3_2:
-; X86-NOBMI-NEXT: # kill: def $ax killed $ax killed $eax
-; X86-NOBMI-NEXT: retl
+; X86-LABEL: test_i16_smin:
+; X86: # %bb.0:
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shrl $15, %eax
+; X86-NEXT: andl %ecx, %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: retl
%r = call i16 @llvm.smin.i16(i16 %a, i16 0)
ret i16 %r
}
More information about the llvm-commits
mailing list