[llvm] [X86] combineConcatVectorOps - add handling for X86ISD::FMAX/FMIN vector ops (PR #172648)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 17 05:00:13 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-x86
Author: Simon Pilgrim (RKSimon)
<details>
<summary>Changes</summary>
These are typically very similar to FADD/FSUB so we don't want to concat if it'd create additional VINSERT ops.
---
Full diff: https://github.com/llvm/llvm-project/pull/172648.diff
3 Files Affected:
- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+2)
- (modified) llvm/test/CodeGen/X86/combine-fmax.ll (+6-6)
- (modified) llvm/test/CodeGen/X86/combine-fmin.ll (+6-6)
``````````diff
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 27d34b3402c38..b933eaec054cc 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -59652,6 +59652,8 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
+ case X86ISD::FMAX:
+ case X86ISD::FMIN:
if (!IsSplat && (VT.is256BitVector() ||
(VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
SDValue Concat0 = CombineSubOperand(VT, Ops, 0);
diff --git a/llvm/test/CodeGen/X86/combine-fmax.ll b/llvm/test/CodeGen/X86/combine-fmax.ll
index 127f21c38ce29..5247b770773d9 100644
--- a/llvm/test/CodeGen/X86/combine-fmax.ll
+++ b/llvm/test/CodeGen/X86/combine-fmax.ll
@@ -14,10 +14,10 @@ define <4 x double> @concat_fmax_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1)
;
; AVX-LABEL: concat_fmax_v4f64_v2f64:
; AVX: # %bb.0:
-; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vmaxpd %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vmaxpd %xmm2, %xmm1, %xmm1
+; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%v0 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> zeroinitializer)
%v1 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a1, <2 x double> zeroinitializer)
@@ -35,10 +35,10 @@ define <8 x float> @concat_fmax_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) {
;
; AVX-LABEL: concat_fmax_v8f32_v4f32:
; AVX: # %bb.0:
-; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vmaxps %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vmaxps %xmm2, %xmm1, %xmm1
+; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%v0 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> zeroinitializer)
%v1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a1, <4 x float> zeroinitializer)
diff --git a/llvm/test/CodeGen/X86/combine-fmin.ll b/llvm/test/CodeGen/X86/combine-fmin.ll
index 7bdc4ba3a8cda..331771ba41ebc 100644
--- a/llvm/test/CodeGen/X86/combine-fmin.ll
+++ b/llvm/test/CodeGen/X86/combine-fmin.ll
@@ -14,10 +14,10 @@ define <4 x double> @concat_fmin_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1)
;
; AVX-LABEL: concat_fmin_v4f64_v2f64:
; AVX: # %bb.0:
-; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vminpd %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vminpd %xmm2, %xmm1, %xmm1
+; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%v0 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> zeroinitializer)
%v1 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a1, <2 x double> zeroinitializer)
@@ -35,10 +35,10 @@ define <8 x float> @concat_fmin_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) {
;
; AVX-LABEL: concat_fmin_v8f32_v4f32:
; AVX: # %bb.0:
-; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vminps %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vminps %xmm2, %xmm1, %xmm1
+; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%v0 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> zeroinitializer)
%v1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a1, <4 x float> zeroinitializer)
``````````
</details>
https://github.com/llvm/llvm-project/pull/172648
More information about the llvm-commits
mailing list