[llvm] SelectionDAG/expandFMINNUM_FMAXNUM: skips vector if Op is legal for elements (PR #109570)
YunQiang Su via llvm-commits
llvm-commits at lists.llvm.org
Sun Oct 6 08:23:56 PDT 2024
https://github.com/wzssyqa updated https://github.com/llvm/llvm-project/pull/109570
>From bd37d0bf94f0b77a4a7b784c874dd5e58caa5e18 Mon Sep 17 00:00:00 2001
From: YunQiang Su <syq at debian.org>
Date: Sun, 22 Sep 2024 15:21:17 +0800
Subject: [PATCH 1/2] SelectionDAG/expandFMINNUM_FMAXNUM: skips vector if Op is
legal for elements
If we are working on an vector, and the operation is legal for the
elements, just skip will be better than expand it.
So that, some simple scale instructions can be emitted instead of
some pairs of comparation+selection.
It cannot be more earlier, since we may use some similar instruction
(such as fminimum/fminnum_ieee) for some cases, such as fast math.
---
.../CodeGen/SelectionDAG/TargetLowering.cpp | 5 +
.../CodeGen/ARM/minnum-maxnum-intrinsics.ll | 252 ++++++-----------
.../CodeGen/Thumb2/mve-vecreduce-fminmax.ll | 264 +++++-------------
3 files changed, 171 insertions(+), 350 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index f19975557a0a77..6f527e32522389 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -8485,6 +8485,11 @@ SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node,
Node->getOperand(1), Node->getFlags());
}
+ // If we have INSN fitting this operation strictly for the elements of the
+ // vector, normally, splitting it is better than compare+select.
+ if (VT.isVector() && isOperationLegal(Node->getOpcode(), VT.getScalarType()))
+ return SDValue();
+
if (SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG))
return SelCC;
diff --git a/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll b/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll
index 528bfe0411730a..975f3860fb1ef6 100644
--- a/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll
+++ b/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll
@@ -918,32 +918,24 @@ define <2 x double> @fminnumv264_intrinsic(<2 x double> %x, <2 x double> %y) {
; ARMV8: @ %bb.0:
; ARMV8-NEXT: mov r12, sp
; ARMV8-NEXT: vld1.64 {d16, d17}, [r12]
-; ARMV8-NEXT: vmov d18, r0, r1
-; ARMV8-NEXT: vmov d19, r2, r3
-; ARMV8-NEXT: vcmp.f64 d16, d18
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vcmp.f64 d17, d19
-; ARMV8-NEXT: vselgt.f64 d18, d18, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov r0, r1, d18
-; ARMV8-NEXT: vselgt.f64 d16, d19, d17
+; ARMV8-NEXT: vmov d19, r0, r1
+; ARMV8-NEXT: vmov d18, r2, r3
+; ARMV8-NEXT: vminnm.f64 d19, d19, d16
+; ARMV8-NEXT: vminnm.f64 d16, d18, d17
+; ARMV8-NEXT: vmov r0, r1, d19
; ARMV8-NEXT: vmov r2, r3, d16
; ARMV8-NEXT: bx lr
;
; ARMV8M-LABEL: fminnumv264_intrinsic:
; ARMV8M: @ %bb.0:
; ARMV8M-NEXT: mov r12, sp
-; ARMV8M-NEXT: vmov d0, r0, r1
+; ARMV8M-NEXT: vmov d0, r2, r3
; ARMV8M-NEXT: vldrw.u32 q1, [r12]
-; ARMV8M-NEXT: vmov d1, r2, r3
-; ARMV8M-NEXT: vcmp.f64 d2, d0
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vcmp.f64 d3, d1
-; ARMV8M-NEXT: vselgt.f64 d0, d0, d2
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vmov r0, r1, d0
-; ARMV8M-NEXT: vselgt.f64 d1, d1, d3
-; ARMV8M-NEXT: vmov r2, r3, d1
+; ARMV8M-NEXT: vmov d1, r0, r1
+; ARMV8M-NEXT: vminnm.f64 d1, d1, d2
+; ARMV8M-NEXT: vminnm.f64 d0, d0, d3
+; ARMV8M-NEXT: vmov r0, r1, d1
+; ARMV8M-NEXT: vmov r2, r3, d0
; ARMV8M-NEXT: bx lr
%a = call nnan <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> %y)
ret <2 x double> %a
@@ -970,32 +962,24 @@ define <2 x double> @fminnumv264_nsz_intrinsic(<2 x double> %x, <2 x double> %y)
; ARMV8: @ %bb.0:
; ARMV8-NEXT: mov r12, sp
; ARMV8-NEXT: vld1.64 {d16, d17}, [r12]
-; ARMV8-NEXT: vmov d18, r0, r1
-; ARMV8-NEXT: vmov d19, r2, r3
-; ARMV8-NEXT: vcmp.f64 d16, d18
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vcmp.f64 d17, d19
-; ARMV8-NEXT: vselgt.f64 d18, d18, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov r0, r1, d18
-; ARMV8-NEXT: vselgt.f64 d16, d19, d17
+; ARMV8-NEXT: vmov d19, r0, r1
+; ARMV8-NEXT: vmov d18, r2, r3
+; ARMV8-NEXT: vminnm.f64 d19, d19, d16
+; ARMV8-NEXT: vminnm.f64 d16, d18, d17
+; ARMV8-NEXT: vmov r0, r1, d19
; ARMV8-NEXT: vmov r2, r3, d16
; ARMV8-NEXT: bx lr
;
; ARMV8M-LABEL: fminnumv264_nsz_intrinsic:
; ARMV8M: @ %bb.0:
; ARMV8M-NEXT: mov r12, sp
-; ARMV8M-NEXT: vmov d0, r0, r1
+; ARMV8M-NEXT: vmov d0, r2, r3
; ARMV8M-NEXT: vldrw.u32 q1, [r12]
-; ARMV8M-NEXT: vmov d1, r2, r3
-; ARMV8M-NEXT: vcmp.f64 d2, d0
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vcmp.f64 d3, d1
-; ARMV8M-NEXT: vselgt.f64 d0, d0, d2
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vmov r0, r1, d0
-; ARMV8M-NEXT: vselgt.f64 d1, d1, d3
-; ARMV8M-NEXT: vmov r2, r3, d1
+; ARMV8M-NEXT: vmov d1, r0, r1
+; ARMV8M-NEXT: vminnm.f64 d1, d1, d2
+; ARMV8M-NEXT: vminnm.f64 d0, d0, d3
+; ARMV8M-NEXT: vmov r0, r1, d1
+; ARMV8M-NEXT: vmov r2, r3, d0
; ARMV8M-NEXT: bx lr
%a = call nnan nsz <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> %y)
ret <2 x double> %a
@@ -1020,31 +1004,23 @@ define <2 x double> @fminnumv264_non_zero_intrinsic(<2 x double> %x) {
;
; ARMV8-LABEL: fminnumv264_non_zero_intrinsic:
; ARMV8: @ %bb.0:
-; ARMV8-NEXT: vmov d17, r0, r1
; ARMV8-NEXT: vmov.f64 d16, #1.000000e+00
-; ARMV8-NEXT: vcmp.f64 d16, d17
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov d18, r2, r3
-; ARMV8-NEXT: vcmp.f64 d16, d18
-; ARMV8-NEXT: vselgt.f64 d17, d17, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov r0, r1, d17
-; ARMV8-NEXT: vselgt.f64 d16, d18, d16
+; ARMV8-NEXT: vmov d18, r0, r1
+; ARMV8-NEXT: vmov d17, r2, r3
+; ARMV8-NEXT: vminnm.f64 d18, d18, d16
+; ARMV8-NEXT: vminnm.f64 d16, d17, d16
+; ARMV8-NEXT: vmov r0, r1, d18
; ARMV8-NEXT: vmov r2, r3, d16
; ARMV8-NEXT: bx lr
;
; ARMV8M-LABEL: fminnumv264_non_zero_intrinsic:
; ARMV8M: @ %bb.0:
-; ARMV8M-NEXT: vmov d1, r0, r1
; ARMV8M-NEXT: vmov.f64 d0, #1.000000e+00
-; ARMV8M-NEXT: vcmp.f64 d0, d1
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vmov d2, r2, r3
-; ARMV8M-NEXT: vcmp.f64 d0, d2
-; ARMV8M-NEXT: vselgt.f64 d1, d1, d0
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vmov r0, r1, d1
-; ARMV8M-NEXT: vselgt.f64 d0, d2, d0
+; ARMV8M-NEXT: vmov d2, r0, r1
+; ARMV8M-NEXT: vmov d1, r2, r3
+; ARMV8M-NEXT: vminnm.f64 d2, d2, d0
+; ARMV8M-NEXT: vminnm.f64 d0, d1, d0
+; ARMV8M-NEXT: vmov r0, r1, d2
; ARMV8M-NEXT: vmov r2, r3, d0
; ARMV8M-NEXT: bx lr
%a = call nnan <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double><double 1.0, double 1.0>)
@@ -1070,34 +1046,26 @@ define <2 x double> @fminnumv264_one_zero_intrinsic(<2 x double> %x) {
;
; ARMV8-LABEL: fminnumv264_one_zero_intrinsic:
; ARMV8: @ %bb.0:
-; ARMV8-NEXT: vmov d19, r2, r3
-; ARMV8-NEXT: vcmp.f64 d19, #0
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov d18, r0, r1
; ARMV8-NEXT: vmov.f64 d16, #-1.000000e+00
-; ARMV8-NEXT: vcmp.f64 d16, d18
+; ARMV8-NEXT: vmov d18, r0, r1
; ARMV8-NEXT: vmov.i32 d17, #0x0
-; ARMV8-NEXT: vmovlt.f64 d17, d19
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov r2, r3, d17
-; ARMV8-NEXT: vselgt.f64 d16, d18, d16
+; ARMV8-NEXT: vminnm.f64 d16, d18, d16
+; ARMV8-NEXT: vmov d19, r2, r3
+; ARMV8-NEXT: vminnm.f64 d17, d19, d17
; ARMV8-NEXT: vmov r0, r1, d16
+; ARMV8-NEXT: vmov r2, r3, d17
; ARMV8-NEXT: bx lr
;
; ARMV8M-LABEL: fminnumv264_one_zero_intrinsic:
; ARMV8M: @ %bb.0:
-; ARMV8M-NEXT: vmov d3, r2, r3
+; ARMV8M-NEXT: vmov.f64 d0, #-1.000000e+00
; ARMV8M-NEXT: vldr d1, .LCPI27_0
-; ARMV8M-NEXT: vcmp.f64 d3, #0
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
; ARMV8M-NEXT: vmov d2, r0, r1
-; ARMV8M-NEXT: vmov.f64 d0, #-1.000000e+00
-; ARMV8M-NEXT: vcmp.f64 d0, d2
-; ARMV8M-NEXT: vmovlt.f64 d1, d3
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vmov r2, r3, d1
-; ARMV8M-NEXT: vselgt.f64 d0, d2, d0
+; ARMV8M-NEXT: vmov d3, r2, r3
+; ARMV8M-NEXT: vminnm.f64 d0, d2, d0
+; ARMV8M-NEXT: vminnm.f64 d1, d3, d1
; ARMV8M-NEXT: vmov r0, r1, d0
+; ARMV8M-NEXT: vmov r2, r3, d1
; ARMV8M-NEXT: bx lr
; ARMV8M-NEXT: .p2align 3
; ARMV8M-NEXT: @ %bb.1:
@@ -1129,31 +1097,23 @@ define <2 x double> @fmaxnumv264_intrinsic(<2 x double> %x, <2 x double> %y) {
; ARMV8: @ %bb.0:
; ARMV8-NEXT: mov r12, sp
; ARMV8-NEXT: vld1.64 {d16, d17}, [r12]
-; ARMV8-NEXT: vmov d18, r0, r1
-; ARMV8-NEXT: vcmp.f64 d18, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov d19, r2, r3
-; ARMV8-NEXT: vcmp.f64 d19, d17
-; ARMV8-NEXT: vselgt.f64 d18, d18, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov r0, r1, d18
-; ARMV8-NEXT: vselgt.f64 d16, d19, d17
+; ARMV8-NEXT: vmov d19, r0, r1
+; ARMV8-NEXT: vmov d18, r2, r3
+; ARMV8-NEXT: vmaxnm.f64 d19, d19, d16
+; ARMV8-NEXT: vmaxnm.f64 d16, d18, d17
+; ARMV8-NEXT: vmov r0, r1, d19
; ARMV8-NEXT: vmov r2, r3, d16
; ARMV8-NEXT: bx lr
;
; ARMV8M-LABEL: fmaxnumv264_intrinsic:
; ARMV8M: @ %bb.0:
; ARMV8M-NEXT: mov r12, sp
-; ARMV8M-NEXT: vmov d1, r0, r1
-; ARMV8M-NEXT: vldrw.u32 q1, [r12]
; ARMV8M-NEXT: vmov d0, r2, r3
-; ARMV8M-NEXT: vcmp.f64 d1, d2
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vcmp.f64 d0, d3
-; ARMV8M-NEXT: vselgt.f64 d1, d1, d2
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
+; ARMV8M-NEXT: vldrw.u32 q1, [r12]
+; ARMV8M-NEXT: vmov d1, r0, r1
+; ARMV8M-NEXT: vmaxnm.f64 d1, d1, d2
+; ARMV8M-NEXT: vmaxnm.f64 d0, d0, d3
; ARMV8M-NEXT: vmov r0, r1, d1
-; ARMV8M-NEXT: vselgt.f64 d0, d0, d3
; ARMV8M-NEXT: vmov r2, r3, d0
; ARMV8M-NEXT: bx lr
%a = call nnan <2 x double> @llvm.maxnum.v2f64(<2 x double> %x, <2 x double> %y)
@@ -1181,31 +1141,23 @@ define <2 x double> @fmaxnumv264_nsz_intrinsic(<2 x double> %x, <2 x double> %y)
; ARMV8: @ %bb.0:
; ARMV8-NEXT: mov r12, sp
; ARMV8-NEXT: vld1.64 {d16, d17}, [r12]
-; ARMV8-NEXT: vmov d18, r0, r1
-; ARMV8-NEXT: vcmp.f64 d18, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov d19, r2, r3
-; ARMV8-NEXT: vcmp.f64 d19, d17
-; ARMV8-NEXT: vselgt.f64 d18, d18, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov r0, r1, d18
-; ARMV8-NEXT: vselgt.f64 d16, d19, d17
+; ARMV8-NEXT: vmov d19, r0, r1
+; ARMV8-NEXT: vmov d18, r2, r3
+; ARMV8-NEXT: vmaxnm.f64 d19, d19, d16
+; ARMV8-NEXT: vmaxnm.f64 d16, d18, d17
+; ARMV8-NEXT: vmov r0, r1, d19
; ARMV8-NEXT: vmov r2, r3, d16
; ARMV8-NEXT: bx lr
;
; ARMV8M-LABEL: fmaxnumv264_nsz_intrinsic:
; ARMV8M: @ %bb.0:
; ARMV8M-NEXT: mov r12, sp
-; ARMV8M-NEXT: vmov d1, r0, r1
-; ARMV8M-NEXT: vldrw.u32 q1, [r12]
; ARMV8M-NEXT: vmov d0, r2, r3
-; ARMV8M-NEXT: vcmp.f64 d1, d2
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vcmp.f64 d0, d3
-; ARMV8M-NEXT: vselgt.f64 d1, d1, d2
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
+; ARMV8M-NEXT: vldrw.u32 q1, [r12]
+; ARMV8M-NEXT: vmov d1, r0, r1
+; ARMV8M-NEXT: vmaxnm.f64 d1, d1, d2
+; ARMV8M-NEXT: vmaxnm.f64 d0, d0, d3
; ARMV8M-NEXT: vmov r0, r1, d1
-; ARMV8M-NEXT: vselgt.f64 d0, d0, d3
; ARMV8M-NEXT: vmov r2, r3, d0
; ARMV8M-NEXT: bx lr
%a = call nnan nsz <2 x double> @llvm.maxnum.v2f64(<2 x double> %x, <2 x double> %y)
@@ -1236,18 +1188,14 @@ define <2 x double> @fmaxnumv264_zero_intrinsic(<2 x double> %x) {
;
; ARMV8-LABEL: fmaxnumv264_zero_intrinsic:
; ARMV8: @ %bb.0:
-; ARMV8-NEXT: vmov d18, r0, r1
; ARMV8-NEXT: vldr d16, .LCPI30_0
-; ARMV8-NEXT: vcmp.f64 d18, #0
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov d19, r2, r3
-; ARMV8-NEXT: vcmp.f64 d19, d16
+; ARMV8-NEXT: vmov d18, r2, r3
; ARMV8-NEXT: vmov.i32 d17, #0x0
-; ARMV8-NEXT: vselgt.f64 d17, d18, d17
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov r0, r1, d17
-; ARMV8-NEXT: vselgt.f64 d16, d19, d16
+; ARMV8-NEXT: vmov d19, r0, r1
+; ARMV8-NEXT: vmaxnm.f64 d16, d18, d16
+; ARMV8-NEXT: vmaxnm.f64 d17, d19, d17
; ARMV8-NEXT: vmov r2, r3, d16
+; ARMV8-NEXT: vmov r0, r1, d17
; ARMV8-NEXT: bx lr
; ARMV8-NEXT: .p2align 3
; ARMV8-NEXT: @ %bb.1:
@@ -1257,18 +1205,14 @@ define <2 x double> @fmaxnumv264_zero_intrinsic(<2 x double> %x) {
;
; ARMV8M-LABEL: fmaxnumv264_zero_intrinsic:
; ARMV8M: @ %bb.0:
-; ARMV8M-NEXT: vmov d2, r0, r1
; ARMV8M-NEXT: vldr d0, .LCPI30_0
-; ARMV8M-NEXT: vcmp.f64 d2, #0
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vmov d3, r2, r3
-; ARMV8M-NEXT: vcmp.f64 d3, d0
+; ARMV8M-NEXT: vmov d2, r2, r3
; ARMV8M-NEXT: vldr d1, .LCPI30_1
-; ARMV8M-NEXT: vselgt.f64 d1, d2, d1
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vmov r0, r1, d1
-; ARMV8M-NEXT: vselgt.f64 d0, d3, d0
+; ARMV8M-NEXT: vmov d3, r0, r1
+; ARMV8M-NEXT: vmaxnm.f64 d0, d2, d0
+; ARMV8M-NEXT: vmaxnm.f64 d1, d3, d1
; ARMV8M-NEXT: vmov r2, r3, d0
+; ARMV8M-NEXT: vmov r0, r1, d1
; ARMV8M-NEXT: bx lr
; ARMV8M-NEXT: .p2align 3
; ARMV8M-NEXT: @ %bb.1:
@@ -1307,15 +1251,11 @@ define <2 x double> @fmaxnumv264_minus_zero_intrinsic(<2 x double> %x) {
; ARMV8-LABEL: fmaxnumv264_minus_zero_intrinsic:
; ARMV8: @ %bb.0:
; ARMV8-NEXT: vldr d16, .LCPI31_0
-; ARMV8-NEXT: vmov d17, r0, r1
-; ARMV8-NEXT: vmov d18, r2, r3
-; ARMV8-NEXT: vcmp.f64 d17, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vcmp.f64 d18, d16
-; ARMV8-NEXT: vselgt.f64 d17, d17, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov r0, r1, d17
-; ARMV8-NEXT: vselgt.f64 d16, d18, d16
+; ARMV8-NEXT: vmov d18, r0, r1
+; ARMV8-NEXT: vmov d17, r2, r3
+; ARMV8-NEXT: vmaxnm.f64 d18, d18, d16
+; ARMV8-NEXT: vmaxnm.f64 d16, d17, d16
+; ARMV8-NEXT: vmov r0, r1, d18
; ARMV8-NEXT: vmov r2, r3, d16
; ARMV8-NEXT: bx lr
; ARMV8-NEXT: .p2align 3
@@ -1327,15 +1267,11 @@ define <2 x double> @fmaxnumv264_minus_zero_intrinsic(<2 x double> %x) {
; ARMV8M-LABEL: fmaxnumv264_minus_zero_intrinsic:
; ARMV8M: @ %bb.0:
; ARMV8M-NEXT: vldr d0, .LCPI31_0
-; ARMV8M-NEXT: vmov d1, r0, r1
-; ARMV8M-NEXT: vmov d2, r2, r3
-; ARMV8M-NEXT: vcmp.f64 d1, d0
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vcmp.f64 d2, d0
-; ARMV8M-NEXT: vselgt.f64 d1, d1, d0
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vmov r0, r1, d1
-; ARMV8M-NEXT: vselgt.f64 d0, d2, d0
+; ARMV8M-NEXT: vmov d2, r0, r1
+; ARMV8M-NEXT: vmov d1, r2, r3
+; ARMV8M-NEXT: vmaxnm.f64 d2, d2, d0
+; ARMV8M-NEXT: vmaxnm.f64 d0, d1, d0
+; ARMV8M-NEXT: vmov r0, r1, d2
; ARMV8M-NEXT: vmov r2, r3, d0
; ARMV8M-NEXT: bx lr
; ARMV8M-NEXT: .p2align 3
@@ -1367,30 +1303,22 @@ define <2 x double> @fmaxnumv264_non_zero_intrinsic(<2 x double> %x) {
; ARMV8-LABEL: fmaxnumv264_non_zero_intrinsic:
; ARMV8: @ %bb.0:
; ARMV8-NEXT: vmov.f64 d16, #1.000000e+00
-; ARMV8-NEXT: vmov d17, r0, r1
-; ARMV8-NEXT: vcmp.f64 d17, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov d18, r2, r3
-; ARMV8-NEXT: vcmp.f64 d18, d16
-; ARMV8-NEXT: vselgt.f64 d17, d17, d16
-; ARMV8-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8-NEXT: vmov r0, r1, d17
-; ARMV8-NEXT: vselgt.f64 d16, d18, d16
+; ARMV8-NEXT: vmov d18, r0, r1
+; ARMV8-NEXT: vmov d17, r2, r3
+; ARMV8-NEXT: vmaxnm.f64 d18, d18, d16
+; ARMV8-NEXT: vmaxnm.f64 d16, d17, d16
+; ARMV8-NEXT: vmov r0, r1, d18
; ARMV8-NEXT: vmov r2, r3, d16
; ARMV8-NEXT: bx lr
;
; ARMV8M-LABEL: fmaxnumv264_non_zero_intrinsic:
; ARMV8M: @ %bb.0:
; ARMV8M-NEXT: vmov.f64 d0, #1.000000e+00
-; ARMV8M-NEXT: vmov d1, r0, r1
-; ARMV8M-NEXT: vcmp.f64 d1, d0
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vmov d2, r2, r3
-; ARMV8M-NEXT: vcmp.f64 d2, d0
-; ARMV8M-NEXT: vselgt.f64 d1, d1, d0
-; ARMV8M-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV8M-NEXT: vmov r0, r1, d1
-; ARMV8M-NEXT: vselgt.f64 d0, d2, d0
+; ARMV8M-NEXT: vmov d2, r0, r1
+; ARMV8M-NEXT: vmov d1, r2, r3
+; ARMV8M-NEXT: vmaxnm.f64 d2, d2, d0
+; ARMV8M-NEXT: vmaxnm.f64 d0, d1, d0
+; ARMV8M-NEXT: vmov r0, r1, d2
; ARMV8M-NEXT: vmov r2, r3, d0
; ARMV8M-NEXT: bx lr
%a = call nnan <2 x double> @llvm.maxnum.v2f64(<2 x double> %x, <2 x double><double 1.0, double 1.0>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
index 7cafb7262f460d..20a90033659f64 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
@@ -43,21 +43,13 @@ define arm_aapcs_vfpcc float @fmin_v8f32(<8 x float> %x) {
;
; CHECK-NOFP-LABEL: fmin_v8f32:
; CHECK-NOFP: @ %bb.0: @ %entry
-; CHECK-NOFP-NEXT: vcmp.f32 s5, s1
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
-; CHECK-NOFP-NEXT: vselgt.f32 s8, s1, s5
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s6, s2
-; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s7, s3
+; CHECK-NOFP-NEXT: vminnm.f32 s8, s1, s5
+; CHECK-NOFP-NEXT: vminnm.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vminnm.f32 s0, s0, s8
-; CHECK-NOFP-NEXT: vselgt.f32 s2, s2, s6
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vminnm.f32 s2, s2, s6
+; CHECK-NOFP-NEXT: vminnm.f32 s0, s0, s2
+; CHECK-NOFP-NEXT: vminnm.f32 s2, s3, s7
; CHECK-NOFP-NEXT: vminnm.f32 s0, s0, s2
-; CHECK-NOFP-NEXT: vselgt.f32 s4, s3, s7
-; CHECK-NOFP-NEXT: vminnm.f32 s0, s0, s4
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call fast float @llvm.vector.reduce.fmin.v8f32(<8 x float> %x)
@@ -129,44 +121,28 @@ define arm_aapcs_vfpcc half @fmin_v16f16(<16 x half> %x) {
;
; CHECK-NOFP-LABEL: fmin_v16f16:
; CHECK-NOFP: @ %bb.0: @ %entry
-; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
-; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
-; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s5, s1
-; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s8
-; CHECK-NOFP-NEXT: vmovx.f16 s8, s1
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s1, s5
+; CHECK-NOFP-NEXT: vminnm.f16 s4, s1, s5
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s5
-; CHECK-NOFP-NEXT: vcmp.f16 s4, s8
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s6, s2
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s8, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s1
+; CHECK-NOFP-NEXT: vminnm.f16 s4, s8, s4
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s4
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s2, s6
-; CHECK-NOFP-NEXT: vmovx.f16 s2, s2
+; CHECK-NOFP-NEXT: vminnm.f16 s4, s2, s6
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s6
-; CHECK-NOFP-NEXT: vcmp.f16 s4, s2
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s7, s3
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s2, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NOFP-NEXT: vmovx.f16 s2, s2
+; CHECK-NOFP-NEXT: vminnm.f16 s2, s2, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s3
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s3, s7
+; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NOFP-NEXT: vminnm.f16 s2, s3, s7
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NOFP-NEXT: vmovx.f16 s2, s7
-; CHECK-NOFP-NEXT: vcmp.f16 s2, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s4, s2
+; CHECK-NOFP-NEXT: vminnm.f16 s2, s4, s2
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NOFP-NEXT: bx lr
entry:
@@ -196,12 +172,8 @@ entry:
define arm_aapcs_vfpcc double @fmin_v4f64(<4 x double> %x) {
; CHECK-LABEL: fmin_v4f64:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vcmp.f64 d3, d1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f64 d2, d0
-; CHECK-NEXT: vselgt.f64 d1, d1, d3
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vminnm.f64 d1, d1, d3
+; CHECK-NEXT: vminnm.f64 d0, d0, d2
; CHECK-NEXT: vminnm.f64 d0, d0, d1
; CHECK-NEXT: bx lr
entry:
@@ -435,21 +407,13 @@ define arm_aapcs_vfpcc float @fmin_v8f32_acc(<8 x float> %x, float %y) {
;
; CHECK-NOFP-LABEL: fmin_v8f32_acc:
; CHECK-NOFP: @ %bb.0: @ %entry
-; CHECK-NOFP-NEXT: vcmp.f32 s5, s1
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
-; CHECK-NOFP-NEXT: vselgt.f32 s10, s1, s5
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s6, s2
-; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s7, s3
+; CHECK-NOFP-NEXT: vminnm.f32 s10, s1, s5
+; CHECK-NOFP-NEXT: vminnm.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vminnm.f32 s0, s0, s10
-; CHECK-NOFP-NEXT: vselgt.f32 s2, s2, s6
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vminnm.f32 s2, s2, s6
+; CHECK-NOFP-NEXT: vminnm.f32 s0, s0, s2
+; CHECK-NOFP-NEXT: vminnm.f32 s2, s3, s7
; CHECK-NOFP-NEXT: vminnm.f32 s0, s0, s2
-; CHECK-NOFP-NEXT: vselgt.f32 s4, s3, s7
-; CHECK-NOFP-NEXT: vminnm.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vminnm.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
@@ -547,44 +511,28 @@ define arm_aapcs_vfpcc half @fmin_v16f16_acc(<16 x half> %x, half %y) {
;
; CHECK-NOFP-LABEL: fmin_v16f16_acc:
; CHECK-NOFP: @ %bb.0: @ %entry
-; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
; CHECK-NOFP-NEXT: vmovx.f16 s12, s0
-; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
-; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s5, s1
-; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
+; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vminnm.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s10
-; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s1, s5
+; CHECK-NOFP-NEXT: vminnm.f16 s4, s1, s5
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s5
-; CHECK-NOFP-NEXT: vcmp.f16 s4, s10
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s6, s2
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s10, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
+; CHECK-NOFP-NEXT: vminnm.f16 s4, s10, s4
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s4
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s2, s6
-; CHECK-NOFP-NEXT: vmovx.f16 s2, s2
+; CHECK-NOFP-NEXT: vminnm.f16 s4, s2, s6
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s6
-; CHECK-NOFP-NEXT: vcmp.f16 s4, s2
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s7, s3
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s2, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NOFP-NEXT: vmovx.f16 s2, s2
+; CHECK-NOFP-NEXT: vminnm.f16 s2, s2, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s3
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s3, s7
+; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NOFP-NEXT: vminnm.f16 s2, s3, s7
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NOFP-NEXT: vmovx.f16 s2, s7
-; CHECK-NOFP-NEXT: vcmp.f16 s2, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s4, s2
+; CHECK-NOFP-NEXT: vminnm.f16 s2, s4, s2
; CHECK-NOFP-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NOFP-NEXT: vminnm.f16 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
@@ -623,12 +571,8 @@ entry:
define arm_aapcs_vfpcc double @fmin_v4f64_acc(<4 x double> %x, double %y) {
; CHECK-LABEL: fmin_v4f64_acc:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vcmp.f64 d3, d1
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f64 d2, d0
-; CHECK-NEXT: vselgt.f64 d1, d1, d3
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vminnm.f64 d1, d1, d3
+; CHECK-NEXT: vminnm.f64 d0, d0, d2
; CHECK-NEXT: vminnm.f64 d0, d0, d1
; CHECK-NEXT: vminnm.f64 d0, d4, d0
; CHECK-NEXT: bx lr
@@ -917,21 +861,13 @@ define arm_aapcs_vfpcc float @fmax_v8f32(<8 x float> %x) {
;
; CHECK-NOFP-LABEL: fmax_v8f32:
; CHECK-NOFP: @ %bb.0: @ %entry
-; CHECK-NOFP-NEXT: vcmp.f32 s1, s5
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
-; CHECK-NOFP-NEXT: vselgt.f32 s8, s1, s5
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s2, s6
-; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s3, s7
+; CHECK-NOFP-NEXT: vmaxnm.f32 s8, s1, s5
+; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s0, s8
-; CHECK-NOFP-NEXT: vselgt.f32 s2, s2, s6
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s2, s6
+; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s0, s2
+; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s3, s7
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s0, s2
-; CHECK-NOFP-NEXT: vselgt.f32 s4, s3, s7
-; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s0, s4
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call fast float @llvm.vector.reduce.fmax.v8f32(<8 x float> %x)
@@ -1003,44 +939,28 @@ define arm_aapcs_vfpcc half @fmax_v16f16(<16 x half> %x) {
;
; CHECK-NOFP-LABEL: fmax_v16f16:
; CHECK-NOFP: @ %bb.0: @ %entry
-; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
-; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
-; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s1, s5
-; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s8
-; CHECK-NOFP-NEXT: vmovx.f16 s8, s1
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s1, s5
+; CHECK-NOFP-NEXT: vmaxnm.f16 s4, s1, s5
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s5
-; CHECK-NOFP-NEXT: vcmp.f16 s8, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s2, s6
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s8, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s1
+; CHECK-NOFP-NEXT: vmaxnm.f16 s4, s8, s4
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s4
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s2, s6
-; CHECK-NOFP-NEXT: vmovx.f16 s2, s2
+; CHECK-NOFP-NEXT: vmaxnm.f16 s4, s2, s6
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s6
-; CHECK-NOFP-NEXT: vcmp.f16 s2, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s3, s7
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s2, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NOFP-NEXT: vmovx.f16 s2, s2
+; CHECK-NOFP-NEXT: vmaxnm.f16 s2, s2, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s3
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s3, s7
+; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NOFP-NEXT: vmaxnm.f16 s2, s3, s7
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NOFP-NEXT: vmovx.f16 s2, s7
-; CHECK-NOFP-NEXT: vcmp.f16 s4, s2
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s4, s2
+; CHECK-NOFP-NEXT: vmaxnm.f16 s2, s4, s2
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NOFP-NEXT: bx lr
entry:
@@ -1070,12 +990,8 @@ entry:
define arm_aapcs_vfpcc double @fmax_v4f64(<4 x double> %x) {
; CHECK-LABEL: fmax_v4f64:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vcmp.f64 d1, d3
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f64 d0, d2
-; CHECK-NEXT: vselgt.f64 d1, d1, d3
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vmaxnm.f64 d1, d1, d3
+; CHECK-NEXT: vmaxnm.f64 d0, d0, d2
; CHECK-NEXT: vmaxnm.f64 d0, d0, d1
; CHECK-NEXT: bx lr
entry:
@@ -1309,21 +1225,13 @@ define arm_aapcs_vfpcc float @fmax_v8f32_acc(<8 x float> %x, float %y) {
;
; CHECK-NOFP-LABEL: fmax_v8f32_acc:
; CHECK-NOFP: @ %bb.0: @ %entry
-; CHECK-NOFP-NEXT: vcmp.f32 s1, s5
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
-; CHECK-NOFP-NEXT: vselgt.f32 s10, s1, s5
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s2, s6
-; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f32 s3, s7
+; CHECK-NOFP-NEXT: vmaxnm.f32 s10, s1, s5
+; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s0, s10
-; CHECK-NOFP-NEXT: vselgt.f32 s2, s2, s6
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s2, s6
+; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s0, s2
+; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s3, s7
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s0, s2
-; CHECK-NOFP-NEXT: vselgt.f32 s4, s3, s7
-; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
@@ -1421,44 +1329,28 @@ define arm_aapcs_vfpcc half @fmax_v16f16_acc(<16 x half> %x, half %y) {
;
; CHECK-NOFP-LABEL: fmax_v16f16_acc:
; CHECK-NOFP: @ %bb.0: @ %entry
-; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
; CHECK-NOFP-NEXT: vmovx.f16 s12, s0
-; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
-; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s1, s5
-; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
+; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vmaxnm.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s10
-; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s1, s5
+; CHECK-NOFP-NEXT: vmaxnm.f16 s4, s1, s5
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s5
-; CHECK-NOFP-NEXT: vcmp.f16 s10, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s2, s6
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s10, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
+; CHECK-NOFP-NEXT: vmaxnm.f16 s4, s10, s4
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s4
-; CHECK-NOFP-NEXT: vselgt.f16 s4, s2, s6
-; CHECK-NOFP-NEXT: vmovx.f16 s2, s2
+; CHECK-NOFP-NEXT: vmaxnm.f16 s4, s2, s6
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s6
-; CHECK-NOFP-NEXT: vcmp.f16 s2, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vcmp.f16 s3, s7
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s2, s4
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NOFP-NEXT: vmovx.f16 s2, s2
+; CHECK-NOFP-NEXT: vmaxnm.f16 s2, s2, s4
; CHECK-NOFP-NEXT: vmovx.f16 s4, s3
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s3, s7
+; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NOFP-NEXT: vmaxnm.f16 s2, s3, s7
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NOFP-NEXT: vmovx.f16 s2, s7
-; CHECK-NOFP-NEXT: vcmp.f16 s4, s2
-; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NOFP-NEXT: vselgt.f16 s2, s4, s2
+; CHECK-NOFP-NEXT: vmaxnm.f16 s2, s4, s2
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
@@ -1497,12 +1389,8 @@ entry:
define arm_aapcs_vfpcc double @fmax_v4f64_acc(<4 x double> %x, double %y) {
; CHECK-LABEL: fmax_v4f64_acc:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vcmp.f64 d1, d3
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vcmp.f64 d0, d2
-; CHECK-NEXT: vselgt.f64 d1, d1, d3
-; CHECK-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vmaxnm.f64 d1, d1, d3
+; CHECK-NEXT: vmaxnm.f64 d0, d0, d2
; CHECK-NEXT: vmaxnm.f64 d0, d0, d1
; CHECK-NEXT: vmaxnm.f64 d0, d4, d0
; CHECK-NEXT: bx lr
>From e2e180b878563ee26c185724ba02a8ec8cdfa32d Mon Sep 17 00:00:00 2001
From: YunQiang Su <syq at debian.org>
Date: Sun, 6 Oct 2024 22:08:51 +0800
Subject: [PATCH 2/2] Use setCondCodeAction and getCondCodeAction
---
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp | 11 ++++++-----
llvm/lib/Target/ARM/ARMISelLowering.cpp | 10 ++++++++++
2 files changed, 16 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 6f527e32522389..8e92ea2813df94 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -8424,6 +8424,12 @@ TargetLowering::createSelectForFMINNUM_FMAXNUM(SDNode *Node,
if (Node->getFlags().hasNoNaNs()) {
ISD::CondCode Pred = Opcode == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT;
+ EVT VT = Node->getValueType(0);
+ if ((!isCondCodeLegal(Pred, VT.getSimpleVT()) ||
+ !isOperationLegalOrCustom(ISD::VSELECT, VT)) &&
+ VT.isVector() &&
+ isOperationLegal(Node->getOpcode(), VT.getScalarType()))
+ return SDValue();
SDValue Op1 = Node->getOperand(0);
SDValue Op2 = Node->getOperand(1);
SDValue SelCC = DAG.getSelectCC(SDLoc(Node), Op1, Op2, Op1, Op2, Pred);
@@ -8485,11 +8491,6 @@ SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node,
Node->getOperand(1), Node->getFlags());
}
- // If we have INSN fitting this operation strictly for the elements of the
- // vector, normally, splitting it is better than compare+select.
- if (VT.isVector() && isOperationLegal(Node->getOpcode(), VT.getScalarType()))
- return SDValue();
-
if (SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG))
return SelCC;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 1733424a8b669f..fd241975eebd22 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -894,6 +894,10 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
setOperationAction(ISD::FMA, MVT::v2f64, Expand);
+
+ for (auto CC : {ISD::SETOGT, ISD::SETOGE, ISD::SETOLT, ISD::SETOLE,
+ ISD::SETGT, ISD::SETGE, ISD::SETLT, ISD::SETLE})
+ setCondCodeAction(CC, MVT::v2f64, Expand);
}
if (Subtarget->hasNEON()) {
@@ -915,6 +919,9 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
+ for (auto CC : {ISD::SETOGT, ISD::SETOGE, ISD::SETOLT, ISD::SETOLE,
+ ISD::SETGT, ISD::SETGE, ISD::SETLT, ISD::SETLE})
+ setCondCodeAction(CC, MVT::v4f32, Expand);
// Mark v2f32 intrinsics.
setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
@@ -933,6 +940,9 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
+ for (auto CC : {ISD::SETOGT, ISD::SETOGE, ISD::SETOLT, ISD::SETOLE,
+ ISD::SETGT, ISD::SETGE, ISD::SETLT, ISD::SETLE})
+ setCondCodeAction(CC, MVT::v2f32, Expand);
// Neon does not support some operations on v1i64 and v2i64 types.
setOperationAction(ISD::MUL, MVT::v1i64, Expand);
More information about the llvm-commits
mailing list