[llvm] cdc6a84 - TargetLowering: Allow FMINNUM/FMAXNUM to lower to FMINIMUM/FMAXIMUM even without `nsz` (#177828)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Jan 25 15:24:17 PST 2026
Author: valadaptive
Date: 2026-01-25T18:24:12-05:00
New Revision: cdc6a84c14ea07d66d9e388171b43429eae3397b
URL: https://github.com/llvm/llvm-project/commit/cdc6a84c14ea07d66d9e388171b43429eae3397b
DIFF: https://github.com/llvm/llvm-project/commit/cdc6a84c14ea07d66d9e388171b43429eae3397b.diff
LOG: TargetLowering: Allow FMINNUM/FMAXNUM to lower to FMINIMUM/FMAXIMUM even without `nsz` (#177828)
This restriction was originally added in
https://reviews.llvm.org/D143256, with the given justification:
> Currently, in TargetLowering, if the target does not support fminnum,
we lower to fminimum if neither operand could be a NaN. But this isn't
quite correct because fminnum and fminimum treat +/-0 differently; so,
we need to prove that one of the operands isn't a zero.
As far as I can tell, this was never correct. Before
https://github.com/llvm/llvm-project/pull/172012, `minnum` and `maxnum`
were nondeterministic with regards to signed zero, so it's always been
perfectly legal to lower them to operations that order signed zeroes.
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/test/CodeGen/ARM/lower-vmax.ll
llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll
llvm/test/CodeGen/ARM/vminmax.ll
llvm/test/CodeGen/WebAssembly/f32.ll
llvm/test/CodeGen/WebAssembly/f64.ll
llvm/test/CodeGen/WebAssembly/simd-arith.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 5cdceb02897e7..edba0a7169c0a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -8809,14 +8809,10 @@ SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node,
}
// If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that
- // instead if there are no NaNs and there can't be an incompatible zero
- // compare: at least one operand isn't +/-0, or there are no signed-zeros.
- if ((Node->getFlags().hasNoNaNs() ||
- (DAG.isKnownNeverNaN(Node->getOperand(0)) &&
- DAG.isKnownNeverNaN(Node->getOperand(1)))) &&
- (Node->getFlags().hasNoSignedZeros() ||
- DAG.isKnownNeverZeroFloat(Node->getOperand(0)) ||
- DAG.isKnownNeverZeroFloat(Node->getOperand(1)))) {
+ // instead if there are no NaNs.
+ if (Node->getFlags().hasNoNaNs() ||
+ (DAG.isKnownNeverNaN(Node->getOperand(0)) &&
+ DAG.isKnownNeverNaN(Node->getOperand(1)))) {
unsigned IEEE2018Op =
Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM;
if (isOperationLegalOrCustom(IEEE2018Op, VT))
diff --git a/llvm/test/CodeGen/ARM/lower-vmax.ll b/llvm/test/CodeGen/ARM/lower-vmax.ll
index 73f0f165fdc73..6dfb466047abb 100644
--- a/llvm/test/CodeGen/ARM/lower-vmax.ll
+++ b/llvm/test/CodeGen/ARM/lower-vmax.ll
@@ -5,16 +5,18 @@
define float @max_f32(float, float) {
; CHECK-NO_NEON-LABEL: max_f32:
; CHECK-NO_NEON: @ %bb.0:
-; CHECK-NO_NEON-NEXT: vcmp.f32 s1, s0
-; CHECK-NO_NEON-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NO_NEON-NEXT: vmovgt.f32 s0, s1
+; CHECK-NO_NEON-NEXT: vmov.f32 s2, s1
+; CHECK-NO_NEON-NEXT: @ kill: def $s0 killed $s0 def $d0
+; CHECK-NO_NEON-NEXT: vmax.f32 d0, d1, d0
+; CHECK-NO_NEON-NEXT: @ kill: def $s0 killed $s0 killed $d0
; CHECK-NO_NEON-NEXT: mov pc, lr
;
; CHECK-NEON-LABEL: max_f32:
; CHECK-NEON: @ %bb.0:
-; CHECK-NEON-NEXT: vcmp.f32 s1, s0
-; CHECK-NEON-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEON-NEXT: vmovgt.f32 s0, s1
+; CHECK-NEON-NEXT: vmov.f32 s2, s1
+; CHECK-NEON-NEXT: @ kill: def $s0 killed $s0 def $d0
+; CHECK-NEON-NEXT: vmax.f32 d0, d1, d0
+; CHECK-NEON-NEXT: @ kill: def $s0 killed $s0 killed $d0
; CHECK-NEON-NEXT: mov pc, lr
%3 = call nnan float @llvm.maxnum.f32(float %1, float %0)
ret float %3
@@ -25,16 +27,18 @@ declare float @llvm.maxnum.f32(float, float) #1
define float @min_f32(float, float) {
; CHECK-NO_NEON-LABEL: min_f32:
; CHECK-NO_NEON: @ %bb.0:
-; CHECK-NO_NEON-NEXT: vcmp.f32 s1, s0
-; CHECK-NO_NEON-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NO_NEON-NEXT: vmovlt.f32 s0, s1
+; CHECK-NO_NEON-NEXT: vmov.f32 s2, s1
+; CHECK-NO_NEON-NEXT: @ kill: def $s0 killed $s0 def $d0
+; CHECK-NO_NEON-NEXT: vmin.f32 d0, d1, d0
+; CHECK-NO_NEON-NEXT: @ kill: def $s0 killed $s0 killed $d0
; CHECK-NO_NEON-NEXT: mov pc, lr
;
; CHECK-NEON-LABEL: min_f32:
; CHECK-NEON: @ %bb.0:
-; CHECK-NEON-NEXT: vcmp.f32 s1, s0
-; CHECK-NEON-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-NEON-NEXT: vmovlt.f32 s0, s1
+; CHECK-NEON-NEXT: vmov.f32 s2, s1
+; CHECK-NEON-NEXT: @ kill: def $s0 killed $s0 def $d0
+; CHECK-NEON-NEXT: vmin.f32 d0, d1, d0
+; CHECK-NEON-NEXT: @ kill: def $s0 killed $s0 killed $d0
; CHECK-NEON-NEXT: mov pc, lr
%3 = call nnan float @llvm.minnum.f32(float %1, float %0)
ret float %3
diff --git a/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll b/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll
index 6706d25ae01d2..8564d7d9996d3 100644
--- a/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll
+++ b/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll
@@ -18,9 +18,7 @@ define float @fminnum32_intrinsic(float %x, float %y) {
; ARMV7: @ %bb.0:
; ARMV7-NEXT: vmov s0, r1
; ARMV7-NEXT: vmov s2, r0
-; ARMV7-NEXT: vcmp.f32 s2, s0
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmovlt.f32 s0, s2
+; ARMV7-NEXT: vmin.f32 d0, d1, d0
; ARMV7-NEXT: vmov r0, s0
; ARMV7-NEXT: bx lr
;
@@ -104,9 +102,7 @@ define float @fmaxnum32_intrinsic(float %x, float %y) {
; ARMV7: @ %bb.0:
; ARMV7-NEXT: vmov s0, r1
; ARMV7-NEXT: vmov s2, r0
-; ARMV7-NEXT: vcmp.f32 s2, s0
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmovgt.f32 s0, s2
+; ARMV7-NEXT: vmax.f32 d0, d1, d0
; ARMV7-NEXT: vmov r0, s0
; ARMV7-NEXT: bx lr
;
@@ -160,12 +156,10 @@ define float @fmaxnum32_nsz_intrinsic(float %x, float %y) {
define float @fmaxnum32_zero_intrinsic(float %x) {
; ARMV7-LABEL: fmaxnum32_zero_intrinsic:
; ARMV7: @ %bb.0:
-; ARMV7-NEXT: vmov s0, r0
-; ARMV7-NEXT: vldr s2, .LCPI5_0
-; ARMV7-NEXT: vcmp.f32 s0, #0
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmovgt.f32 s2, s0
-; ARMV7-NEXT: vmov r0, s2
+; ARMV7-NEXT: vldr s0, .LCPI5_0
+; ARMV7-NEXT: vmov s2, r0
+; ARMV7-NEXT: vmax.f32 d0, d1, d0
+; ARMV7-NEXT: vmov r0, s0
; ARMV7-NEXT: bx lr
; ARMV7-NEXT: .p2align 2
; ARMV7-NEXT: @ %bb.1:
@@ -490,24 +484,13 @@ define double @fmaxnum64_non_zero_intrinsic(double %x) {
define <4 x float> @fminnumv432_intrinsic(<4 x float> %x, <4 x float> %y) {
; ARMV7-LABEL: fminnumv432_intrinsic:
; ARMV7: @ %bb.0:
-; ARMV7-NEXT: mov r12, sp
-; ARMV7-NEXT: vld1.64 {d0, d1}, [r12]
-; ARMV7-NEXT: vmov d3, r2, r3
-; ARMV7-NEXT: vmov d2, r0, r1
-; ARMV7-NEXT: vcmp.f32 s6, s2
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vcmp.f32 s7, s3
-; ARMV7-NEXT: vmovlt.f32 s2, s6
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vcmp.f32 s5, s1
-; ARMV7-NEXT: vmovlt.f32 s3, s7
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vcmp.f32 s4, s0
-; ARMV7-NEXT: vmovlt.f32 s1, s5
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmovlt.f32 s0, s4
-; ARMV7-NEXT: vmov r2, r3, d1
-; ARMV7-NEXT: vmov r0, r1, d0
+; ARMV7-NEXT: vmov d17, r2, r3
+; ARMV7-NEXT: vmov d16, r0, r1
+; ARMV7-NEXT: mov r0, sp
+; ARMV7-NEXT: vld1.64 {d18, d19}, [r0]
+; ARMV7-NEXT: vmin.f32 q8, q8, q9
+; ARMV7-NEXT: vmov r0, r1, d16
+; ARMV7-NEXT: vmov r2, r3, d17
; ARMV7-NEXT: bx lr
;
; ARMV8-LABEL: fminnumv432_intrinsic:
@@ -609,26 +592,21 @@ define <4 x float> @fminnumv432_non_zero_intrinsic(<4 x float> %x) {
define <4 x float> @fminnumv432_one_zero_intrinsic(<4 x float> %x) {
; ARMV7-LABEL: fminnumv432_one_zero_intrinsic:
; ARMV7: @ %bb.0:
-; ARMV7-NEXT: vmov d1, r2, r3
-; ARMV7-NEXT: vldr s4, .LCPI18_0
-; ARMV7-NEXT: vmov d0, r0, r1
-; ARMV7-NEXT: vmov.f32 s6, #-1.000000e+00
-; ARMV7-NEXT: vcmp.f32 s1, #0
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmov.f32 s8, s3
-; ARMV7-NEXT: vmin.f32 d7, d1, d3
-; ARMV7-NEXT: vmin.f32 d6, d0, d3
-; ARMV7-NEXT: vmin.f32 d4, d4, d3
-; ARMV7-NEXT: vmovlt.f32 s4, s1
-; ARMV7-NEXT: vmov.f32 s13, s4
-; ARMV7-NEXT: vmov.f32 s15, s8
-; ARMV7-NEXT: vmov r0, r1, d6
-; ARMV7-NEXT: vmov r2, r3, d7
+; ARMV7-NEXT: vmov d17, r2, r3
+; ARMV7-NEXT: vmov d16, r0, r1
+; ARMV7-NEXT: adr r0, .LCPI18_0
+; ARMV7-NEXT: vld1.64 {d18, d19}, [r0:128]
+; ARMV7-NEXT: vmin.f32 q8, q8, q9
+; ARMV7-NEXT: vmov r0, r1, d16
+; ARMV7-NEXT: vmov r2, r3, d17
; ARMV7-NEXT: bx lr
-; ARMV7-NEXT: .p2align 2
+; ARMV7-NEXT: .p2align 4
; ARMV7-NEXT: @ %bb.1:
; ARMV7-NEXT: .LCPI18_0:
+; ARMV7-NEXT: .long 0xbf800000 @ float -1
; ARMV7-NEXT: .long 0x00000000 @ float 0
+; ARMV7-NEXT: .long 0xbf800000 @ float -1
+; ARMV7-NEXT: .long 0xbf800000 @ float -1
;
; ARMV8-LABEL: fminnumv432_one_zero_intrinsic:
; ARMV8: @ %bb.0:
@@ -672,24 +650,13 @@ define <4 x float> @fminnumv432_one_zero_intrinsic(<4 x float> %x) {
define <4 x float> @fmaxnumv432_intrinsic(<4 x float> %x, <4 x float> %y) {
; ARMV7-LABEL: fmaxnumv432_intrinsic:
; ARMV7: @ %bb.0:
-; ARMV7-NEXT: mov r12, sp
-; ARMV7-NEXT: vld1.64 {d0, d1}, [r12]
-; ARMV7-NEXT: vmov d3, r2, r3
-; ARMV7-NEXT: vmov d2, r0, r1
-; ARMV7-NEXT: vcmp.f32 s6, s2
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vcmp.f32 s7, s3
-; ARMV7-NEXT: vmovgt.f32 s2, s6
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vcmp.f32 s5, s1
-; ARMV7-NEXT: vmovgt.f32 s3, s7
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vcmp.f32 s4, s0
-; ARMV7-NEXT: vmovgt.f32 s1, s5
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmovgt.f32 s0, s4
-; ARMV7-NEXT: vmov r2, r3, d1
-; ARMV7-NEXT: vmov r0, r1, d0
+; ARMV7-NEXT: vmov d17, r2, r3
+; ARMV7-NEXT: vmov d16, r0, r1
+; ARMV7-NEXT: mov r0, sp
+; ARMV7-NEXT: vld1.64 {d18, d19}, [r0]
+; ARMV7-NEXT: vmax.f32 q8, q8, q9
+; ARMV7-NEXT: vmov r0, r1, d16
+; ARMV7-NEXT: vmov r2, r3, d17
; ARMV7-NEXT: bx lr
;
; ARMV8-LABEL: fmaxnumv432_intrinsic:
@@ -757,31 +724,13 @@ define <4 x float> @fmaxnumv432_nsz_intrinsic(<4 x float> %x, <4 x float> %y) {
define <4 x float> @fmaxnumv432_zero_intrinsic(<4 x float> %x) {
; ARMV7-LABEL: fmaxnumv432_zero_intrinsic:
; ARMV7: @ %bb.0:
-; ARMV7-NEXT: vmov d3, r2, r3
-; ARMV7-NEXT: vldr s0, .LCPI21_0
-; ARMV7-NEXT: vmov d2, r0, r1
-; ARMV7-NEXT: vcmp.f32 s6, #0
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmov.f32 s2, s0
-; ARMV7-NEXT: vcmp.f32 s7, #0
-; ARMV7-NEXT: vmovgt.f32 s2, s6
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmov.f32 s3, s0
-; ARMV7-NEXT: vcmp.f32 s5, #0
-; ARMV7-NEXT: vmovgt.f32 s3, s7
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmov.f32 s1, s0
-; ARMV7-NEXT: vcmp.f32 s4, #0
-; ARMV7-NEXT: vmovgt.f32 s1, s5
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmovgt.f32 s0, s4
-; ARMV7-NEXT: vmov r2, r3, d1
-; ARMV7-NEXT: vmov r0, r1, d0
+; ARMV7-NEXT: vmov d19, r2, r3
+; ARMV7-NEXT: vmov.i32 q8, #0x0
+; ARMV7-NEXT: vmov d18, r0, r1
+; ARMV7-NEXT: vmax.f32 q8, q9, q8
+; ARMV7-NEXT: vmov r0, r1, d16
+; ARMV7-NEXT: vmov r2, r3, d17
; ARMV7-NEXT: bx lr
-; ARMV7-NEXT: .p2align 2
-; ARMV7-NEXT: @ %bb.1:
-; ARMV7-NEXT: .LCPI21_0:
-; ARMV7-NEXT: .long 0x00000000 @ float 0
;
; ARMV8-LABEL: fmaxnumv432_zero_intrinsic:
; ARMV8: @ %bb.0:
@@ -809,31 +758,13 @@ define <4 x float> @fmaxnumv432_zero_intrinsic(<4 x float> %x) {
define <4 x float> @fmaxnumv432_minus_zero_intrinsic(<4 x float> %x) {
; ARMV7-LABEL: fmaxnumv432_minus_zero_intrinsic:
; ARMV7: @ %bb.0:
-; ARMV7-NEXT: vldr s0, .LCPI22_0
-; ARMV7-NEXT: vmov d3, r2, r3
-; ARMV7-NEXT: vmov d2, r0, r1
-; ARMV7-NEXT: vcmp.f32 s6, s0
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vcmp.f32 s7, s0
-; ARMV7-NEXT: vmov.f32 s2, s0
-; ARMV7-NEXT: vmovgt.f32 s2, s6
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vcmp.f32 s5, s0
-; ARMV7-NEXT: vmov.f32 s3, s0
-; ARMV7-NEXT: vmovgt.f32 s3, s7
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vcmp.f32 s4, s0
-; ARMV7-NEXT: vmov.f32 s1, s0
-; ARMV7-NEXT: vmovgt.f32 s1, s5
-; ARMV7-NEXT: vmrs APSR_nzcv, fpscr
-; ARMV7-NEXT: vmovgt.f32 s0, s4
-; ARMV7-NEXT: vmov r2, r3, d1
-; ARMV7-NEXT: vmov r0, r1, d0
+; ARMV7-NEXT: vmov d19, r2, r3
+; ARMV7-NEXT: vmov.i32 q8, #0x80000000
+; ARMV7-NEXT: vmov d18, r0, r1
+; ARMV7-NEXT: vmax.f32 q8, q9, q8
+; ARMV7-NEXT: vmov r0, r1, d16
+; ARMV7-NEXT: vmov r2, r3, d17
; ARMV7-NEXT: bx lr
-; ARMV7-NEXT: .p2align 2
-; ARMV7-NEXT: @ %bb.1:
-; ARMV7-NEXT: .LCPI22_0:
-; ARMV7-NEXT: .long 0x80000000 @ float -0
;
; ARMV8-LABEL: fmaxnumv432_minus_zero_intrinsic:
; ARMV8: @ %bb.0:
diff --git a/llvm/test/CodeGen/ARM/vminmax.ll b/llvm/test/CodeGen/ARM/vminmax.ll
index dc4a6ac2a79b0..b026e2956f87c 100644
--- a/llvm/test/CodeGen/ARM/vminmax.ll
+++ b/llvm/test/CodeGen/ARM/vminmax.ll
@@ -1,129 +1,207 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vmins8(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmins8:
-;CHECK: vmin.s8
- %tmp1 = load <8 x i8>, ptr %A
- %tmp2 = load <8 x i8>, ptr %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
+; CHECK-LABEL: vmins8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmin.s8 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <8 x i8>, ptr %A
+ %tmp2 = load <8 x i8>, ptr %B
+ %tmp3 = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
}
define <4 x i16> @vmins16(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmins16:
-;CHECK: vmin.s16
- %tmp1 = load <4 x i16>, ptr %A
- %tmp2 = load <4 x i16>, ptr %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
+; CHECK-LABEL: vmins16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmin.s16 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <4 x i16>, ptr %A
+ %tmp2 = load <4 x i16>, ptr %B
+ %tmp3 = call <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
}
define <2 x i32> @vmins32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmins32:
-;CHECK: vmin.s32
- %tmp1 = load <2 x i32>, ptr %A
- %tmp2 = load <2 x i32>, ptr %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
+; CHECK-LABEL: vmins32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmin.s32 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <2 x i32>, ptr %A
+ %tmp2 = load <2 x i32>, ptr %B
+ %tmp3 = call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
}
define <8 x i8> @vminu8(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminu8:
-;CHECK: vmin.u8
- %tmp1 = load <8 x i8>, ptr %A
- %tmp2 = load <8 x i8>, ptr %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
+; CHECK-LABEL: vminu8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmin.u8 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <8 x i8>, ptr %A
+ %tmp2 = load <8 x i8>, ptr %B
+ %tmp3 = call <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
}
define <4 x i16> @vminu16(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminu16:
-;CHECK: vmin.u16
- %tmp1 = load <4 x i16>, ptr %A
- %tmp2 = load <4 x i16>, ptr %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
+; CHECK-LABEL: vminu16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmin.u16 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <4 x i16>, ptr %A
+ %tmp2 = load <4 x i16>, ptr %B
+ %tmp3 = call <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
}
define <2 x i32> @vminu32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminu32:
-;CHECK: vmin.u32
- %tmp1 = load <2 x i32>, ptr %A
- %tmp2 = load <2 x i32>, ptr %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
+; CHECK-LABEL: vminu32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmin.u32 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <2 x i32>, ptr %A
+ %tmp2 = load <2 x i32>, ptr %B
+ %tmp3 = call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
}
define <2 x float> @vminf32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminf32:
-;CHECK: vmin.f32
- %tmp1 = load <2 x float>, ptr %A
- %tmp2 = load <2 x float>, ptr %B
- %tmp3 = call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x float> %tmp3
+; CHECK-LABEL: vminf32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmin.f32 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <2 x float>, ptr %A
+ %tmp2 = load <2 x float>, ptr %B
+ %tmp3 = call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
}
define <16 x i8> @vminQs8(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminQs8:
-;CHECK: vmin.s8
- %tmp1 = load <16 x i8>, ptr %A
- %tmp2 = load <16 x i8>, ptr %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
+; CHECK-LABEL: vminQs8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmin.s8 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
+ %tmp3 = call <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
}
define <8 x i16> @vminQs16(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminQs16:
-;CHECK: vmin.s16
- %tmp1 = load <8 x i16>, ptr %A
- %tmp2 = load <8 x i16>, ptr %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
+; CHECK-LABEL: vminQs16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmin.s16 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <8 x i16>, ptr %A
+ %tmp2 = load <8 x i16>, ptr %B
+ %tmp3 = call <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
}
define <4 x i32> @vminQs32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminQs32:
-;CHECK: vmin.s32
- %tmp1 = load <4 x i32>, ptr %A
- %tmp2 = load <4 x i32>, ptr %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
+; CHECK-LABEL: vminQs32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmin.s32 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <4 x i32>, ptr %A
+ %tmp2 = load <4 x i32>, ptr %B
+ %tmp3 = call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
}
define <16 x i8> @vminQu8(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminQu8:
-;CHECK: vmin.u8
- %tmp1 = load <16 x i8>, ptr %A
- %tmp2 = load <16 x i8>, ptr %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
+; CHECK-LABEL: vminQu8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmin.u8 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
+ %tmp3 = call <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
}
define <8 x i16> @vminQu16(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminQu16:
-;CHECK: vmin.u16
- %tmp1 = load <8 x i16>, ptr %A
- %tmp2 = load <8 x i16>, ptr %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
+; CHECK-LABEL: vminQu16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmin.u16 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <8 x i16>, ptr %A
+ %tmp2 = load <8 x i16>, ptr %B
+ %tmp3 = call <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
}
define <4 x i32> @vminQu32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminQu32:
-;CHECK: vmin.u32
- %tmp1 = load <4 x i32>, ptr %A
- %tmp2 = load <4 x i32>, ptr %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
+; CHECK-LABEL: vminQu32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmin.u32 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <4 x i32>, ptr %A
+ %tmp2 = load <4 x i32>, ptr %B
+ %tmp3 = call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
}
define <4 x float> @vminQf32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vminQf32:
-;CHECK: vmin.f32
- %tmp1 = load <4 x float>, ptr %A
- %tmp2 = load <4 x float>, ptr %B
- %tmp3 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
- ret <4 x float> %tmp3
+; CHECK-LABEL: vminQf32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmin.f32 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <4 x float>, ptr %A
+ %tmp2 = load <4 x float>, ptr %B
+ %tmp3 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
}
declare <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
@@ -147,129 +225,206 @@ declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind read
declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone
define <8 x i8> @vmaxs8(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxs8:
-;CHECK: vmax.s8
- %tmp1 = load <8 x i8>, ptr %A
- %tmp2 = load <8 x i8>, ptr %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
+; CHECK-LABEL: vmaxs8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmax.s8 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <8 x i8>, ptr %A
+ %tmp2 = load <8 x i8>, ptr %B
+ %tmp3 = call <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
}
define <4 x i16> @vmaxs16(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxs16:
-;CHECK: vmax.s16
- %tmp1 = load <4 x i16>, ptr %A
- %tmp2 = load <4 x i16>, ptr %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
+; CHECK-LABEL: vmaxs16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmax.s16 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <4 x i16>, ptr %A
+ %tmp2 = load <4 x i16>, ptr %B
+ %tmp3 = call <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
}
define <2 x i32> @vmaxs32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxs32:
-;CHECK: vmax.s32
- %tmp1 = load <2 x i32>, ptr %A
- %tmp2 = load <2 x i32>, ptr %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
+; CHECK-LABEL: vmaxs32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmax.s32 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <2 x i32>, ptr %A
+ %tmp2 = load <2 x i32>, ptr %B
+ %tmp3 = call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
}
define <8 x i8> @vmaxu8(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxu8:
-;CHECK: vmax.u8
- %tmp1 = load <8 x i8>, ptr %A
- %tmp2 = load <8 x i8>, ptr %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
+; CHECK-LABEL: vmaxu8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmax.u8 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <8 x i8>, ptr %A
+ %tmp2 = load <8 x i8>, ptr %B
+ %tmp3 = call <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
}
define <4 x i16> @vmaxu16(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxu16:
-;CHECK: vmax.u16
- %tmp1 = load <4 x i16>, ptr %A
- %tmp2 = load <4 x i16>, ptr %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
+; CHECK-LABEL: vmaxu16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmax.u16 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <4 x i16>, ptr %A
+ %tmp2 = load <4 x i16>, ptr %B
+ %tmp3 = call <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
}
define <2 x i32> @vmaxu32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxu32:
-;CHECK: vmax.u32
- %tmp1 = load <2 x i32>, ptr %A
- %tmp2 = load <2 x i32>, ptr %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
+; CHECK-LABEL: vmaxu32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmax.u32 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <2 x i32>, ptr %A
+ %tmp2 = load <2 x i32>, ptr %B
+ %tmp3 = call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
}
define <2 x float> @vmaxf32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxf32:
-;CHECK: vmax.f32
- %tmp1 = load <2 x float>, ptr %A
- %tmp2 = load <2 x float>, ptr %B
- %tmp3 = call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x float> %tmp3
+; CHECK-LABEL: vmaxf32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmax.f32 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <2 x float>, ptr %A
+ %tmp2 = load <2 x float>, ptr %B
+ %tmp3 = call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
}
define <16 x i8> @vmaxQs8(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxQs8:
-;CHECK: vmax.s8
- %tmp1 = load <16 x i8>, ptr %A
- %tmp2 = load <16 x i8>, ptr %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
+; CHECK-LABEL: vmaxQs8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmax.s8 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
+ %tmp3 = call <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
}
define <8 x i16> @vmaxQs16(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxQs16:
-;CHECK: vmax.s16
- %tmp1 = load <8 x i16>, ptr %A
- %tmp2 = load <8 x i16>, ptr %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
+; CHECK-LABEL: vmaxQs16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmax.s16 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <8 x i16>, ptr %A
+ %tmp2 = load <8 x i16>, ptr %B
+ %tmp3 = call <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
}
define <4 x i32> @vmaxQs32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxQs32:
-;CHECK: vmax.s32
- %tmp1 = load <4 x i32>, ptr %A
- %tmp2 = load <4 x i32>, ptr %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
+; CHECK-LABEL: vmaxQs32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmax.s32 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <4 x i32>, ptr %A
+ %tmp2 = load <4 x i32>, ptr %B
+ %tmp3 = call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
}
define <16 x i8> @vmaxQu8(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxQu8:
-;CHECK: vmax.u8
- %tmp1 = load <16 x i8>, ptr %A
- %tmp2 = load <16 x i8>, ptr %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
+; CHECK-LABEL: vmaxQu8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmax.u8 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <16 x i8>, ptr %A
+ %tmp2 = load <16 x i8>, ptr %B
+ %tmp3 = call <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
}
define <8 x i16> @vmaxQu16(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxQu16:
-;CHECK: vmax.u16
- %tmp1 = load <8 x i16>, ptr %A
- %tmp2 = load <8 x i16>, ptr %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
+; CHECK-LABEL: vmaxQu16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmax.u16 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <8 x i16>, ptr %A
+ %tmp2 = load <8 x i16>, ptr %B
+ %tmp3 = call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
}
define <4 x i32> @vmaxQu32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxQu32:
-;CHECK: vmax.u32
- %tmp1 = load <4 x i32>, ptr %A
- %tmp2 = load <4 x i32>, ptr %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
+; CHECK-LABEL: vmaxQu32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmax.u32 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <4 x i32>, ptr %A
+ %tmp2 = load <4 x i32>, ptr %B
+ %tmp3 = call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
}
define <4 x float> @vmaxQf32(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: vmaxQf32:
-;CHECK: vmax.f32
- %tmp1 = load <4 x float>, ptr %A
- %tmp2 = load <4 x float>, ptr %B
- %tmp3 = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
- ret <4 x float> %tmp3
+; CHECK-LABEL: vmaxQf32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmax.f32 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
+ %tmp1 = load <4 x float>, ptr %A
+ %tmp2 = load <4 x float>, ptr %B
+ %tmp3 = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
}
declare <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
@@ -296,19 +451,25 @@ declare float @llvm.maxnum.f32(float %a, float %b)
declare float @llvm.minnum.f32(float %a, float %b)
define float @maxnum(float %a, float %b) {
-;CHECK-LABEL: maxnum:
-;CHECK: vcmp.f32
-;CHECK-NEXT: vmrs
-;CHECK-NEXT: vmovgt.f32
+; CHECK-LABEL: maxnum:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov s0, r1
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vmax.f32 d0, d1, d0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: mov pc, lr
%r = call nnan float @llvm.maxnum.f32(float %a, float %b)
ret float %r
}
define float @minnum(float %a, float %b) {
-;CHECK-LABEL: minnum:
-;CHECK: vcmp.f32
-;CHECK-NEXT: vmrs
-;CHECK-NEXT: vmovlt.f32
+; CHECK-LABEL: minnum:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov s0, r1
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vmin.f32 d0, d1, d0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: mov pc, lr
%r = call nnan float @llvm.minnum.f32(float %a, float %b)
ret float %r
}
diff --git a/llvm/test/CodeGen/WebAssembly/f32.ll b/llvm/test/CodeGen/WebAssembly/f32.ll
index 7410fa43e4081..a7c35317f1da8 100644
--- a/llvm/test/CodeGen/WebAssembly/f32.ll
+++ b/llvm/test/CodeGen/WebAssembly/f32.ll
@@ -229,13 +229,10 @@ define float @fminnum32_intrinsic(float %x, float %y) {
; CHECK-LABEL: fminnum32_intrinsic:
; CHECK: .functype fminnum32_intrinsic (f32, f32) -> (f32)
; CHECK-NEXT: # %bb.0:
-; CHECK-NEXT: local.get $push5=, 0
-; CHECK-NEXT: local.get $push4=, 1
-; CHECK-NEXT: local.get $push3=, 0
-; CHECK-NEXT: local.get $push2=, 1
-; CHECK-NEXT: f32.lt $push0=, $pop3, $pop2
-; CHECK-NEXT: f32.select $push1=, $pop5, $pop4, $pop0
-; CHECK-NEXT: return $pop1
+; CHECK-NEXT: local.get $push2=, 0
+; CHECK-NEXT: local.get $push1=, 1
+; CHECK-NEXT: f32.min $push0=, $pop2, $pop1
+; CHECK-NEXT: return $pop0
%a = call nnan float @llvm.minnum.f32(float %x, float %y)
ret float %a
}
@@ -282,13 +279,10 @@ define float @fmaxnum32_intrinsic(float %x, float %y) {
; CHECK-LABEL: fmaxnum32_intrinsic:
; CHECK: .functype fmaxnum32_intrinsic (f32, f32) -> (f32)
; CHECK-NEXT: # %bb.0:
-; CHECK-NEXT: local.get $push5=, 0
-; CHECK-NEXT: local.get $push4=, 1
-; CHECK-NEXT: local.get $push3=, 0
-; CHECK-NEXT: local.get $push2=, 1
-; CHECK-NEXT: f32.gt $push0=, $pop3, $pop2
-; CHECK-NEXT: f32.select $push1=, $pop5, $pop4, $pop0
-; CHECK-NEXT: return $pop1
+; CHECK-NEXT: local.get $push2=, 0
+; CHECK-NEXT: local.get $push1=, 1
+; CHECK-NEXT: f32.max $push0=, $pop2, $pop1
+; CHECK-NEXT: return $pop0
%a = call nnan float @llvm.maxnum.f32(float %x, float %y)
ret float %a
}
@@ -309,13 +303,10 @@ define float @fmaxnum32_zero_intrinsic(float %x) {
; CHECK-LABEL: fmaxnum32_zero_intrinsic:
; CHECK: .functype fmaxnum32_zero_intrinsic (f32) -> (f32)
; CHECK-NEXT: # %bb.0:
-; CHECK-NEXT: local.get $push5=, 0
+; CHECK-NEXT: local.get $push2=, 0
; CHECK-NEXT: f32.const $push0=, 0x0p0
-; CHECK-NEXT: local.get $push4=, 0
-; CHECK-NEXT: f32.const $push3=, 0x0p0
-; CHECK-NEXT: f32.gt $push1=, $pop4, $pop3
-; CHECK-NEXT: f32.select $push2=, $pop5, $pop0, $pop1
-; CHECK-NEXT: return $pop2
+; CHECK-NEXT: f32.max $push1=, $pop2, $pop0
+; CHECK-NEXT: return $pop1
%a = call nnan float @llvm.maxnum.f32(float %x, float 0.0)
ret float %a
}
diff --git a/llvm/test/CodeGen/WebAssembly/f64.ll b/llvm/test/CodeGen/WebAssembly/f64.ll
index d79f34185eb87..c5af777888d36 100644
--- a/llvm/test/CodeGen/WebAssembly/f64.ll
+++ b/llvm/test/CodeGen/WebAssembly/f64.ll
@@ -229,13 +229,10 @@ define double @fminnum64_intrinsic(double %x, double %y) {
; CHECK-LABEL: fminnum64_intrinsic:
; CHECK: .functype fminnum64_intrinsic (f64, f64) -> (f64)
; CHECK-NEXT: # %bb.0:
-; CHECK-NEXT: local.get $push5=, 0
-; CHECK-NEXT: local.get $push4=, 1
-; CHECK-NEXT: local.get $push3=, 0
-; CHECK-NEXT: local.get $push2=, 1
-; CHECK-NEXT: f64.lt $push0=, $pop3, $pop2
-; CHECK-NEXT: f64.select $push1=, $pop5, $pop4, $pop0
-; CHECK-NEXT: return $pop1
+; CHECK-NEXT: local.get $push2=, 0
+; CHECK-NEXT: local.get $push1=, 1
+; CHECK-NEXT: f64.min $push0=, $pop2, $pop1
+; CHECK-NEXT: return $pop0
%a = call nnan double @llvm.minnum.f64(double %x, double %y)
ret double %a
}
@@ -256,13 +253,10 @@ define double @fminnum64_zero_intrinsic(double %x) {
; CHECK-LABEL: fminnum64_zero_intrinsic:
; CHECK: .functype fminnum64_zero_intrinsic (f64) -> (f64)
; CHECK-NEXT: # %bb.0:
-; CHECK-NEXT: local.get $push5=, 0
+; CHECK-NEXT: local.get $push2=, 0
; CHECK-NEXT: f64.const $push0=, -0x0p0
-; CHECK-NEXT: local.get $push4=, 0
-; CHECK-NEXT: f64.const $push3=, -0x0p0
-; CHECK-NEXT: f64.lt $push1=, $pop4, $pop3
-; CHECK-NEXT: f64.select $push2=, $pop5, $pop0, $pop1
-; CHECK-NEXT: return $pop2
+; CHECK-NEXT: f64.min $push1=, $pop2, $pop0
+; CHECK-NEXT: return $pop1
%a = call nnan double @llvm.minnum.f64(double %x, double -0.0)
ret double %a
}
@@ -297,13 +291,10 @@ define double at fmaxnum64_intrinsic(double %x, double %y) {
; CHECK-LABEL: fmaxnum64_intrinsic:
; CHECK: .functype fmaxnum64_intrinsic (f64, f64) -> (f64)
; CHECK-NEXT: # %bb.0:
-; CHECK-NEXT: local.get $push5=, 0
-; CHECK-NEXT: local.get $push4=, 1
-; CHECK-NEXT: local.get $push3=, 0
-; CHECK-NEXT: local.get $push2=, 1
-; CHECK-NEXT: f64.gt $push0=, $pop3, $pop2
-; CHECK-NEXT: f64.select $push1=, $pop5, $pop4, $pop0
-; CHECK-NEXT: return $pop1
+; CHECK-NEXT: local.get $push2=, 0
+; CHECK-NEXT: local.get $push1=, 1
+; CHECK-NEXT: f64.max $push0=, $pop2, $pop1
+; CHECK-NEXT: return $pop0
%a = call nnan double @llvm.maxnum.f64(double %x, double %y)
ret double %a
}
@@ -324,13 +315,10 @@ define double @fmaxnum64_zero_intrinsic(double %x) {
; CHECK-LABEL: fmaxnum64_zero_intrinsic:
; CHECK: .functype fmaxnum64_zero_intrinsic (f64) -> (f64)
; CHECK-NEXT: # %bb.0:
-; CHECK-NEXT: local.get $push5=, 0
+; CHECK-NEXT: local.get $push2=, 0
; CHECK-NEXT: f64.const $push0=, 0x0p0
-; CHECK-NEXT: local.get $push4=, 0
-; CHECK-NEXT: f64.const $push3=, 0x0p0
-; CHECK-NEXT: f64.gt $push1=, $pop4, $pop3
-; CHECK-NEXT: f64.select $push2=, $pop5, $pop0, $pop1
-; CHECK-NEXT: return $pop2
+; CHECK-NEXT: f64.max $push1=, $pop2, $pop0
+; CHECK-NEXT: return $pop1
%a = call nnan double @llvm.maxnum.f64(double %x, double 0.0)
ret double %a
}
diff --git a/llvm/test/CodeGen/WebAssembly/simd-arith.ll b/llvm/test/CodeGen/WebAssembly/simd-arith.ll
index 60b4a837f7c31..cb06ee84ec99c 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-arith.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-arith.ll
@@ -12396,101 +12396,39 @@ define <4 x float> @minnum_intrinsic_v4f32(<4 x float> %x, <4 x float> %y) {
; SIMD128-LABEL: minnum_intrinsic_v4f32:
; SIMD128: .functype minnum_intrinsic_v4f32 (v128, v128) -> (v128)
; SIMD128-NEXT: # %bb.0:
-; SIMD128-NEXT: f32x4.extract_lane $push27=, $0, 0
-; SIMD128-NEXT: local.tee $push26=, $3=, $pop27
-; SIMD128-NEXT: f32x4.extract_lane $push25=, $1, 0
-; SIMD128-NEXT: local.tee $push24=, $2=, $pop25
-; SIMD128-NEXT: f32.lt $push2=, $3, $2
-; SIMD128-NEXT: f32.select $push3=, $pop26, $pop24, $pop2
-; SIMD128-NEXT: f32x4.splat $push4=, $pop3
-; SIMD128-NEXT: f32x4.extract_lane $push23=, $0, 1
-; SIMD128-NEXT: local.tee $push22=, $3=, $pop23
-; SIMD128-NEXT: f32x4.extract_lane $push21=, $1, 1
-; SIMD128-NEXT: local.tee $push20=, $2=, $pop21
-; SIMD128-NEXT: f32.lt $push0=, $3, $2
-; SIMD128-NEXT: f32.select $push1=, $pop22, $pop20, $pop0
-; SIMD128-NEXT: f32x4.replace_lane $push5=, $pop4, 1, $pop1
-; SIMD128-NEXT: f32x4.extract_lane $push19=, $0, 2
-; SIMD128-NEXT: local.tee $push18=, $3=, $pop19
-; SIMD128-NEXT: f32x4.extract_lane $push17=, $1, 2
-; SIMD128-NEXT: local.tee $push16=, $2=, $pop17
-; SIMD128-NEXT: f32.lt $push6=, $3, $2
-; SIMD128-NEXT: f32.select $push7=, $pop18, $pop16, $pop6
-; SIMD128-NEXT: f32x4.replace_lane $push8=, $pop5, 2, $pop7
-; SIMD128-NEXT: f32x4.extract_lane $push15=, $0, 3
-; SIMD128-NEXT: local.tee $push14=, $3=, $pop15
-; SIMD128-NEXT: f32x4.extract_lane $push13=, $1, 3
-; SIMD128-NEXT: local.tee $push12=, $2=, $pop13
-; SIMD128-NEXT: f32.lt $push9=, $3, $2
-; SIMD128-NEXT: f32.select $push10=, $pop14, $pop12, $pop9
-; SIMD128-NEXT: f32x4.replace_lane $push11=, $pop8, 3, $pop10
-; SIMD128-NEXT: return $pop11
+; SIMD128-NEXT: f32x4.min $push0=, $0, $1
+; SIMD128-NEXT: return $pop0
;
; SIMD128-FAST-LABEL: minnum_intrinsic_v4f32:
; SIMD128-FAST: .functype minnum_intrinsic_v4f32 (v128, v128) -> (v128)
; SIMD128-FAST-NEXT: # %bb.0:
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push27=, $0, 0
-; SIMD128-FAST-NEXT: local.tee $push26=, $3=, $pop27
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push25=, $1, 0
-; SIMD128-FAST-NEXT: local.tee $push24=, $2=, $pop25
-; SIMD128-FAST-NEXT: f32.lt $push3=, $3, $2
-; SIMD128-FAST-NEXT: f32.select $push4=, $pop26, $pop24, $pop3
-; SIMD128-FAST-NEXT: f32x4.splat $push5=, $pop4
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push23=, $0, 1
-; SIMD128-FAST-NEXT: local.tee $push22=, $3=, $pop23
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push21=, $1, 1
-; SIMD128-FAST-NEXT: local.tee $push20=, $2=, $pop21
-; SIMD128-FAST-NEXT: f32.lt $push1=, $3, $2
-; SIMD128-FAST-NEXT: f32.select $push2=, $pop22, $pop20, $pop1
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push6=, $pop5, 1, $pop2
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push19=, $0, 2
-; SIMD128-FAST-NEXT: local.tee $push18=, $3=, $pop19
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push17=, $1, 2
-; SIMD128-FAST-NEXT: local.tee $push16=, $2=, $pop17
-; SIMD128-FAST-NEXT: f32.lt $push7=, $3, $2
-; SIMD128-FAST-NEXT: f32.select $push8=, $pop18, $pop16, $pop7
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push9=, $pop6, 2, $pop8
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push15=, $0, 3
-; SIMD128-FAST-NEXT: local.tee $push14=, $3=, $pop15
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push13=, $1, 3
-; SIMD128-FAST-NEXT: local.tee $push12=, $2=, $pop13
-; SIMD128-FAST-NEXT: f32.lt $push10=, $3, $2
-; SIMD128-FAST-NEXT: f32.select $push11=, $pop14, $pop12, $pop10
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push0=, $pop9, 3, $pop11
+; SIMD128-FAST-NEXT: f32x4.min $push0=, $0, $1
; SIMD128-FAST-NEXT: return $pop0
;
; NO-SIMD128-LABEL: minnum_intrinsic_v4f32:
; NO-SIMD128: .functype minnum_intrinsic_v4f32 (i32, f32, f32, f32, f32, f32, f32, f32, f32) -> ()
; NO-SIMD128-NEXT: # %bb.0:
-; NO-SIMD128-NEXT: f32.lt $push0=, $4, $8
-; NO-SIMD128-NEXT: f32.select $push1=, $4, $8, $pop0
-; NO-SIMD128-NEXT: f32.store 12($0), $pop1
-; NO-SIMD128-NEXT: f32.lt $push2=, $3, $7
-; NO-SIMD128-NEXT: f32.select $push3=, $3, $7, $pop2
-; NO-SIMD128-NEXT: f32.store 8($0), $pop3
-; NO-SIMD128-NEXT: f32.lt $push4=, $2, $6
-; NO-SIMD128-NEXT: f32.select $push5=, $2, $6, $pop4
-; NO-SIMD128-NEXT: f32.store 4($0), $pop5
-; NO-SIMD128-NEXT: f32.lt $push6=, $1, $5
-; NO-SIMD128-NEXT: f32.select $push7=, $1, $5, $pop6
-; NO-SIMD128-NEXT: f32.store 0($0), $pop7
+; NO-SIMD128-NEXT: f32.min $push0=, $4, $8
+; NO-SIMD128-NEXT: f32.store 12($0), $pop0
+; NO-SIMD128-NEXT: f32.min $push1=, $3, $7
+; NO-SIMD128-NEXT: f32.store 8($0), $pop1
+; NO-SIMD128-NEXT: f32.min $push2=, $2, $6
+; NO-SIMD128-NEXT: f32.store 4($0), $pop2
+; NO-SIMD128-NEXT: f32.min $push3=, $1, $5
+; NO-SIMD128-NEXT: f32.store 0($0), $pop3
; NO-SIMD128-NEXT: return
;
; NO-SIMD128-FAST-LABEL: minnum_intrinsic_v4f32:
; NO-SIMD128-FAST: .functype minnum_intrinsic_v4f32 (i32, f32, f32, f32, f32, f32, f32, f32, f32) -> ()
; NO-SIMD128-FAST-NEXT: # %bb.0:
-; NO-SIMD128-FAST-NEXT: f32.lt $push0=, $1, $5
-; NO-SIMD128-FAST-NEXT: f32.select $push1=, $1, $5, $pop0
-; NO-SIMD128-FAST-NEXT: f32.store 0($0), $pop1
-; NO-SIMD128-FAST-NEXT: f32.lt $push2=, $2, $6
-; NO-SIMD128-FAST-NEXT: f32.select $push3=, $2, $6, $pop2
-; NO-SIMD128-FAST-NEXT: f32.store 4($0), $pop3
-; NO-SIMD128-FAST-NEXT: f32.lt $push4=, $3, $7
-; NO-SIMD128-FAST-NEXT: f32.select $push5=, $3, $7, $pop4
-; NO-SIMD128-FAST-NEXT: f32.store 8($0), $pop5
-; NO-SIMD128-FAST-NEXT: f32.lt $push6=, $4, $8
-; NO-SIMD128-FAST-NEXT: f32.select $push7=, $4, $8, $pop6
-; NO-SIMD128-FAST-NEXT: f32.store 12($0), $pop7
+; NO-SIMD128-FAST-NEXT: f32.min $push0=, $1, $5
+; NO-SIMD128-FAST-NEXT: f32.store 0($0), $pop0
+; NO-SIMD128-FAST-NEXT: f32.min $push1=, $2, $6
+; NO-SIMD128-FAST-NEXT: f32.store 4($0), $pop1
+; NO-SIMD128-FAST-NEXT: f32.min $push2=, $3, $7
+; NO-SIMD128-FAST-NEXT: f32.store 8($0), $pop2
+; NO-SIMD128-FAST-NEXT: f32.min $push3=, $4, $8
+; NO-SIMD128-FAST-NEXT: f32.store 12($0), $pop3
; NO-SIMD128-FAST-NEXT: return
%a = call nnan <4 x float> @llvm.minnum.v4f32(<4 x float> %x, <4 x float> %y)
ret <4 x float> %a
@@ -12594,67 +12532,15 @@ define <4 x float> @fminnumv432_one_zero_intrinsic(<4 x float> %x) {
; SIMD128-LABEL: fminnumv432_one_zero_intrinsic:
; SIMD128: .functype fminnumv432_one_zero_intrinsic (v128) -> (v128)
; SIMD128-NEXT: # %bb.0:
-; SIMD128-NEXT: f32x4.extract_lane $push27=, $0, 0
-; SIMD128-NEXT: local.tee $push26=, $1=, $pop27
-; SIMD128-NEXT: f32.const $push3=, -0x1p0
-; SIMD128-NEXT: f32.const $push25=, -0x1p0
-; SIMD128-NEXT: f32.lt $push4=, $1, $pop25
-; SIMD128-NEXT: f32.select $push5=, $pop26, $pop3, $pop4
-; SIMD128-NEXT: f32x4.splat $push6=, $pop5
-; SIMD128-NEXT: f32x4.extract_lane $push24=, $0, 1
-; SIMD128-NEXT: local.tee $push23=, $1=, $pop24
-; SIMD128-NEXT: f32.const $push0=, 0x0p0
-; SIMD128-NEXT: f32.const $push22=, 0x0p0
-; SIMD128-NEXT: f32.lt $push1=, $1, $pop22
-; SIMD128-NEXT: f32.select $push2=, $pop23, $pop0, $pop1
-; SIMD128-NEXT: f32x4.replace_lane $push7=, $pop6, 1, $pop2
-; SIMD128-NEXT: f32x4.extract_lane $push21=, $0, 2
-; SIMD128-NEXT: local.tee $push20=, $1=, $pop21
-; SIMD128-NEXT: f32.const $push19=, -0x1p0
-; SIMD128-NEXT: f32.const $push18=, -0x1p0
-; SIMD128-NEXT: f32.lt $push8=, $1, $pop18
-; SIMD128-NEXT: f32.select $push9=, $pop20, $pop19, $pop8
-; SIMD128-NEXT: f32x4.replace_lane $push10=, $pop7, 2, $pop9
-; SIMD128-NEXT: f32x4.extract_lane $push17=, $0, 3
-; SIMD128-NEXT: local.tee $push16=, $1=, $pop17
-; SIMD128-NEXT: f32.const $push15=, -0x1p0
-; SIMD128-NEXT: f32.const $push14=, -0x1p0
-; SIMD128-NEXT: f32.lt $push11=, $1, $pop14
-; SIMD128-NEXT: f32.select $push12=, $pop16, $pop15, $pop11
-; SIMD128-NEXT: f32x4.replace_lane $push13=, $pop10, 3, $pop12
-; SIMD128-NEXT: return $pop13
+; SIMD128-NEXT: v128.const $push0=, -0x1p0, 0x0p0, -0x1p0, -0x1p0
+; SIMD128-NEXT: f32x4.min $push1=, $0, $pop0
+; SIMD128-NEXT: return $pop1
;
; SIMD128-FAST-LABEL: fminnumv432_one_zero_intrinsic:
; SIMD128-FAST: .functype fminnumv432_one_zero_intrinsic (v128) -> (v128)
; SIMD128-FAST-NEXT: # %bb.0:
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push27=, $0, 0
-; SIMD128-FAST-NEXT: local.tee $push26=, $1=, $pop27
-; SIMD128-FAST-NEXT: f32.const $push4=, -0x1p0
-; SIMD128-FAST-NEXT: f32.const $push25=, -0x1p0
-; SIMD128-FAST-NEXT: f32.lt $push5=, $1, $pop25
-; SIMD128-FAST-NEXT: f32.select $push6=, $pop26, $pop4, $pop5
-; SIMD128-FAST-NEXT: f32x4.splat $push7=, $pop6
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push24=, $0, 1
-; SIMD128-FAST-NEXT: local.tee $push23=, $1=, $pop24
-; SIMD128-FAST-NEXT: f32.const $push1=, 0x0p0
-; SIMD128-FAST-NEXT: f32.const $push22=, 0x0p0
-; SIMD128-FAST-NEXT: f32.lt $push2=, $1, $pop22
-; SIMD128-FAST-NEXT: f32.select $push3=, $pop23, $pop1, $pop2
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push8=, $pop7, 1, $pop3
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push21=, $0, 2
-; SIMD128-FAST-NEXT: local.tee $push20=, $1=, $pop21
-; SIMD128-FAST-NEXT: f32.const $push19=, -0x1p0
-; SIMD128-FAST-NEXT: f32.const $push18=, -0x1p0
-; SIMD128-FAST-NEXT: f32.lt $push9=, $1, $pop18
-; SIMD128-FAST-NEXT: f32.select $push10=, $pop20, $pop19, $pop9
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push11=, $pop8, 2, $pop10
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push17=, $0, 3
-; SIMD128-FAST-NEXT: local.tee $push16=, $1=, $pop17
-; SIMD128-FAST-NEXT: f32.const $push15=, -0x1p0
-; SIMD128-FAST-NEXT: f32.const $push14=, -0x1p0
-; SIMD128-FAST-NEXT: f32.lt $push12=, $1, $pop14
-; SIMD128-FAST-NEXT: f32.select $push13=, $pop16, $pop15, $pop12
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push0=, $pop11, 3, $pop13
+; SIMD128-FAST-NEXT: v128.const $push1=, -0x1p0, 0x0p0, -0x1p0, -0x1p0
+; SIMD128-FAST-NEXT: f32x4.min $push0=, $0, $pop1
; SIMD128-FAST-NEXT: return $pop0
;
; NO-SIMD128-LABEL: fminnumv432_one_zero_intrinsic:
@@ -12663,17 +12549,15 @@ define <4 x float> @fminnumv432_one_zero_intrinsic(<4 x float> %x) {
; NO-SIMD128-NEXT: f32.const $push0=, -0x1p0
; NO-SIMD128-NEXT: f32.min $push1=, $4, $pop0
; NO-SIMD128-NEXT: f32.store 12($0), $pop1
-; NO-SIMD128-NEXT: f32.const $push9=, -0x1p0
-; NO-SIMD128-NEXT: f32.min $push2=, $3, $pop9
+; NO-SIMD128-NEXT: f32.const $push7=, -0x1p0
+; NO-SIMD128-NEXT: f32.min $push2=, $3, $pop7
; NO-SIMD128-NEXT: f32.store 8($0), $pop2
; NO-SIMD128-NEXT: f32.const $push3=, 0x0p0
-; NO-SIMD128-NEXT: f32.const $push8=, 0x0p0
-; NO-SIMD128-NEXT: f32.lt $push4=, $2, $pop8
-; NO-SIMD128-NEXT: f32.select $push5=, $2, $pop3, $pop4
-; NO-SIMD128-NEXT: f32.store 4($0), $pop5
-; NO-SIMD128-NEXT: f32.const $push7=, -0x1p0
-; NO-SIMD128-NEXT: f32.min $push6=, $1, $pop7
-; NO-SIMD128-NEXT: f32.store 0($0), $pop6
+; NO-SIMD128-NEXT: f32.min $push4=, $2, $pop3
+; NO-SIMD128-NEXT: f32.store 4($0), $pop4
+; NO-SIMD128-NEXT: f32.const $push6=, -0x1p0
+; NO-SIMD128-NEXT: f32.min $push5=, $1, $pop6
+; NO-SIMD128-NEXT: f32.store 0($0), $pop5
; NO-SIMD128-NEXT: return
;
; NO-SIMD128-FAST-LABEL: fminnumv432_one_zero_intrinsic:
@@ -12682,17 +12566,15 @@ define <4 x float> @fminnumv432_one_zero_intrinsic(<4 x float> %x) {
; NO-SIMD128-FAST-NEXT: f32.const $push0=, -0x1p0
; NO-SIMD128-FAST-NEXT: f32.min $push1=, $1, $pop0
; NO-SIMD128-FAST-NEXT: f32.store 0($0), $pop1
-; NO-SIMD128-FAST-NEXT: f32.const $push9=, -0x1p0
-; NO-SIMD128-FAST-NEXT: f32.min $push2=, $3, $pop9
-; NO-SIMD128-FAST-NEXT: f32.store 8($0), $pop2
-; NO-SIMD128-FAST-NEXT: f32.const $push3=, 0x0p0
-; NO-SIMD128-FAST-NEXT: f32.const $push8=, 0x0p0
-; NO-SIMD128-FAST-NEXT: f32.lt $push4=, $2, $pop8
-; NO-SIMD128-FAST-NEXT: f32.select $push5=, $2, $pop3, $pop4
-; NO-SIMD128-FAST-NEXT: f32.store 4($0), $pop5
+; NO-SIMD128-FAST-NEXT: f32.const $push2=, 0x0p0
+; NO-SIMD128-FAST-NEXT: f32.min $push3=, $2, $pop2
+; NO-SIMD128-FAST-NEXT: f32.store 4($0), $pop3
; NO-SIMD128-FAST-NEXT: f32.const $push7=, -0x1p0
-; NO-SIMD128-FAST-NEXT: f32.min $push6=, $4, $pop7
-; NO-SIMD128-FAST-NEXT: f32.store 12($0), $pop6
+; NO-SIMD128-FAST-NEXT: f32.min $push4=, $3, $pop7
+; NO-SIMD128-FAST-NEXT: f32.store 8($0), $pop4
+; NO-SIMD128-FAST-NEXT: f32.const $push6=, -0x1p0
+; NO-SIMD128-FAST-NEXT: f32.min $push5=, $4, $pop6
+; NO-SIMD128-FAST-NEXT: f32.store 12($0), $pop5
; NO-SIMD128-FAST-NEXT: return
%a = call nnan <4 x float> @llvm.minnum.v4f32(<4 x float> %x, <4 x float><float -1.0, float 0.0, float -1.0, float -1.0>)
ret <4 x float> %a
@@ -12746,101 +12628,39 @@ define <4 x float> @maxnum_intrinsic_v4f32(<4 x float> %x, <4 x float> %y) {
; SIMD128-LABEL: maxnum_intrinsic_v4f32:
; SIMD128: .functype maxnum_intrinsic_v4f32 (v128, v128) -> (v128)
; SIMD128-NEXT: # %bb.0:
-; SIMD128-NEXT: f32x4.extract_lane $push27=, $0, 0
-; SIMD128-NEXT: local.tee $push26=, $3=, $pop27
-; SIMD128-NEXT: f32x4.extract_lane $push25=, $1, 0
-; SIMD128-NEXT: local.tee $push24=, $2=, $pop25
-; SIMD128-NEXT: f32.gt $push2=, $3, $2
-; SIMD128-NEXT: f32.select $push3=, $pop26, $pop24, $pop2
-; SIMD128-NEXT: f32x4.splat $push4=, $pop3
-; SIMD128-NEXT: f32x4.extract_lane $push23=, $0, 1
-; SIMD128-NEXT: local.tee $push22=, $3=, $pop23
-; SIMD128-NEXT: f32x4.extract_lane $push21=, $1, 1
-; SIMD128-NEXT: local.tee $push20=, $2=, $pop21
-; SIMD128-NEXT: f32.gt $push0=, $3, $2
-; SIMD128-NEXT: f32.select $push1=, $pop22, $pop20, $pop0
-; SIMD128-NEXT: f32x4.replace_lane $push5=, $pop4, 1, $pop1
-; SIMD128-NEXT: f32x4.extract_lane $push19=, $0, 2
-; SIMD128-NEXT: local.tee $push18=, $3=, $pop19
-; SIMD128-NEXT: f32x4.extract_lane $push17=, $1, 2
-; SIMD128-NEXT: local.tee $push16=, $2=, $pop17
-; SIMD128-NEXT: f32.gt $push6=, $3, $2
-; SIMD128-NEXT: f32.select $push7=, $pop18, $pop16, $pop6
-; SIMD128-NEXT: f32x4.replace_lane $push8=, $pop5, 2, $pop7
-; SIMD128-NEXT: f32x4.extract_lane $push15=, $0, 3
-; SIMD128-NEXT: local.tee $push14=, $3=, $pop15
-; SIMD128-NEXT: f32x4.extract_lane $push13=, $1, 3
-; SIMD128-NEXT: local.tee $push12=, $2=, $pop13
-; SIMD128-NEXT: f32.gt $push9=, $3, $2
-; SIMD128-NEXT: f32.select $push10=, $pop14, $pop12, $pop9
-; SIMD128-NEXT: f32x4.replace_lane $push11=, $pop8, 3, $pop10
-; SIMD128-NEXT: return $pop11
+; SIMD128-NEXT: f32x4.max $push0=, $0, $1
+; SIMD128-NEXT: return $pop0
;
; SIMD128-FAST-LABEL: maxnum_intrinsic_v4f32:
; SIMD128-FAST: .functype maxnum_intrinsic_v4f32 (v128, v128) -> (v128)
; SIMD128-FAST-NEXT: # %bb.0:
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push27=, $0, 0
-; SIMD128-FAST-NEXT: local.tee $push26=, $3=, $pop27
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push25=, $1, 0
-; SIMD128-FAST-NEXT: local.tee $push24=, $2=, $pop25
-; SIMD128-FAST-NEXT: f32.gt $push3=, $3, $2
-; SIMD128-FAST-NEXT: f32.select $push4=, $pop26, $pop24, $pop3
-; SIMD128-FAST-NEXT: f32x4.splat $push5=, $pop4
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push23=, $0, 1
-; SIMD128-FAST-NEXT: local.tee $push22=, $3=, $pop23
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push21=, $1, 1
-; SIMD128-FAST-NEXT: local.tee $push20=, $2=, $pop21
-; SIMD128-FAST-NEXT: f32.gt $push1=, $3, $2
-; SIMD128-FAST-NEXT: f32.select $push2=, $pop22, $pop20, $pop1
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push6=, $pop5, 1, $pop2
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push19=, $0, 2
-; SIMD128-FAST-NEXT: local.tee $push18=, $3=, $pop19
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push17=, $1, 2
-; SIMD128-FAST-NEXT: local.tee $push16=, $2=, $pop17
-; SIMD128-FAST-NEXT: f32.gt $push7=, $3, $2
-; SIMD128-FAST-NEXT: f32.select $push8=, $pop18, $pop16, $pop7
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push9=, $pop6, 2, $pop8
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push15=, $0, 3
-; SIMD128-FAST-NEXT: local.tee $push14=, $3=, $pop15
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push13=, $1, 3
-; SIMD128-FAST-NEXT: local.tee $push12=, $2=, $pop13
-; SIMD128-FAST-NEXT: f32.gt $push10=, $3, $2
-; SIMD128-FAST-NEXT: f32.select $push11=, $pop14, $pop12, $pop10
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push0=, $pop9, 3, $pop11
+; SIMD128-FAST-NEXT: f32x4.max $push0=, $0, $1
; SIMD128-FAST-NEXT: return $pop0
;
; NO-SIMD128-LABEL: maxnum_intrinsic_v4f32:
; NO-SIMD128: .functype maxnum_intrinsic_v4f32 (i32, f32, f32, f32, f32, f32, f32, f32, f32) -> ()
; NO-SIMD128-NEXT: # %bb.0:
-; NO-SIMD128-NEXT: f32.gt $push0=, $4, $8
-; NO-SIMD128-NEXT: f32.select $push1=, $4, $8, $pop0
-; NO-SIMD128-NEXT: f32.store 12($0), $pop1
-; NO-SIMD128-NEXT: f32.gt $push2=, $3, $7
-; NO-SIMD128-NEXT: f32.select $push3=, $3, $7, $pop2
-; NO-SIMD128-NEXT: f32.store 8($0), $pop3
-; NO-SIMD128-NEXT: f32.gt $push4=, $2, $6
-; NO-SIMD128-NEXT: f32.select $push5=, $2, $6, $pop4
-; NO-SIMD128-NEXT: f32.store 4($0), $pop5
-; NO-SIMD128-NEXT: f32.gt $push6=, $1, $5
-; NO-SIMD128-NEXT: f32.select $push7=, $1, $5, $pop6
-; NO-SIMD128-NEXT: f32.store 0($0), $pop7
+; NO-SIMD128-NEXT: f32.max $push0=, $4, $8
+; NO-SIMD128-NEXT: f32.store 12($0), $pop0
+; NO-SIMD128-NEXT: f32.max $push1=, $3, $7
+; NO-SIMD128-NEXT: f32.store 8($0), $pop1
+; NO-SIMD128-NEXT: f32.max $push2=, $2, $6
+; NO-SIMD128-NEXT: f32.store 4($0), $pop2
+; NO-SIMD128-NEXT: f32.max $push3=, $1, $5
+; NO-SIMD128-NEXT: f32.store 0($0), $pop3
; NO-SIMD128-NEXT: return
;
; NO-SIMD128-FAST-LABEL: maxnum_intrinsic_v4f32:
; NO-SIMD128-FAST: .functype maxnum_intrinsic_v4f32 (i32, f32, f32, f32, f32, f32, f32, f32, f32) -> ()
; NO-SIMD128-FAST-NEXT: # %bb.0:
-; NO-SIMD128-FAST-NEXT: f32.gt $push0=, $1, $5
-; NO-SIMD128-FAST-NEXT: f32.select $push1=, $1, $5, $pop0
-; NO-SIMD128-FAST-NEXT: f32.store 0($0), $pop1
-; NO-SIMD128-FAST-NEXT: f32.gt $push2=, $2, $6
-; NO-SIMD128-FAST-NEXT: f32.select $push3=, $2, $6, $pop2
-; NO-SIMD128-FAST-NEXT: f32.store 4($0), $pop3
-; NO-SIMD128-FAST-NEXT: f32.gt $push4=, $3, $7
-; NO-SIMD128-FAST-NEXT: f32.select $push5=, $3, $7, $pop4
-; NO-SIMD128-FAST-NEXT: f32.store 8($0), $pop5
-; NO-SIMD128-FAST-NEXT: f32.gt $push6=, $4, $8
-; NO-SIMD128-FAST-NEXT: f32.select $push7=, $4, $8, $pop6
-; NO-SIMD128-FAST-NEXT: f32.store 12($0), $pop7
+; NO-SIMD128-FAST-NEXT: f32.max $push0=, $1, $5
+; NO-SIMD128-FAST-NEXT: f32.store 0($0), $pop0
+; NO-SIMD128-FAST-NEXT: f32.max $push1=, $2, $6
+; NO-SIMD128-FAST-NEXT: f32.store 4($0), $pop1
+; NO-SIMD128-FAST-NEXT: f32.max $push2=, $3, $7
+; NO-SIMD128-FAST-NEXT: f32.store 8($0), $pop2
+; NO-SIMD128-FAST-NEXT: f32.max $push3=, $4, $8
+; NO-SIMD128-FAST-NEXT: f32.store 12($0), $pop3
; NO-SIMD128-FAST-NEXT: return
%a = call nnan <4 x float> @llvm.maxnum.v4f32(<4 x float> %x, <4 x float> %y)
ret <4 x float> %a
@@ -12892,67 +12712,15 @@ define <4 x float> @maxnum_one_zero_intrinsic_v4f32(<4 x float> %x, <4 x float>
; SIMD128-LABEL: maxnum_one_zero_intrinsic_v4f32:
; SIMD128: .functype maxnum_one_zero_intrinsic_v4f32 (v128, v128) -> (v128)
; SIMD128-NEXT: # %bb.0:
-; SIMD128-NEXT: f32x4.extract_lane $push27=, $0, 0
-; SIMD128-NEXT: local.tee $push26=, $2=, $pop27
-; SIMD128-NEXT: f32.const $push3=, -0x1p0
-; SIMD128-NEXT: f32.const $push25=, -0x1p0
-; SIMD128-NEXT: f32.gt $push4=, $2, $pop25
-; SIMD128-NEXT: f32.select $push5=, $pop26, $pop3, $pop4
-; SIMD128-NEXT: f32x4.splat $push6=, $pop5
-; SIMD128-NEXT: f32x4.extract_lane $push24=, $0, 1
-; SIMD128-NEXT: local.tee $push23=, $2=, $pop24
-; SIMD128-NEXT: f32.const $push0=, 0x0p0
-; SIMD128-NEXT: f32.const $push22=, 0x0p0
-; SIMD128-NEXT: f32.gt $push1=, $2, $pop22
-; SIMD128-NEXT: f32.select $push2=, $pop23, $pop0, $pop1
-; SIMD128-NEXT: f32x4.replace_lane $push7=, $pop6, 1, $pop2
-; SIMD128-NEXT: f32x4.extract_lane $push21=, $0, 2
-; SIMD128-NEXT: local.tee $push20=, $2=, $pop21
-; SIMD128-NEXT: f32.const $push19=, -0x1p0
-; SIMD128-NEXT: f32.const $push18=, -0x1p0
-; SIMD128-NEXT: f32.gt $push8=, $2, $pop18
-; SIMD128-NEXT: f32.select $push9=, $pop20, $pop19, $pop8
-; SIMD128-NEXT: f32x4.replace_lane $push10=, $pop7, 2, $pop9
-; SIMD128-NEXT: f32x4.extract_lane $push17=, $0, 3
-; SIMD128-NEXT: local.tee $push16=, $2=, $pop17
-; SIMD128-NEXT: f32.const $push15=, -0x1p0
-; SIMD128-NEXT: f32.const $push14=, -0x1p0
-; SIMD128-NEXT: f32.gt $push11=, $2, $pop14
-; SIMD128-NEXT: f32.select $push12=, $pop16, $pop15, $pop11
-; SIMD128-NEXT: f32x4.replace_lane $push13=, $pop10, 3, $pop12
-; SIMD128-NEXT: return $pop13
+; SIMD128-NEXT: v128.const $push0=, -0x1p0, 0x0p0, -0x1p0, -0x1p0
+; SIMD128-NEXT: f32x4.max $push1=, $0, $pop0
+; SIMD128-NEXT: return $pop1
;
; SIMD128-FAST-LABEL: maxnum_one_zero_intrinsic_v4f32:
; SIMD128-FAST: .functype maxnum_one_zero_intrinsic_v4f32 (v128, v128) -> (v128)
; SIMD128-FAST-NEXT: # %bb.0:
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push27=, $0, 0
-; SIMD128-FAST-NEXT: local.tee $push26=, $2=, $pop27
-; SIMD128-FAST-NEXT: f32.const $push4=, -0x1p0
-; SIMD128-FAST-NEXT: f32.const $push25=, -0x1p0
-; SIMD128-FAST-NEXT: f32.gt $push5=, $2, $pop25
-; SIMD128-FAST-NEXT: f32.select $push6=, $pop26, $pop4, $pop5
-; SIMD128-FAST-NEXT: f32x4.splat $push7=, $pop6
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push24=, $0, 1
-; SIMD128-FAST-NEXT: local.tee $push23=, $2=, $pop24
-; SIMD128-FAST-NEXT: f32.const $push1=, 0x0p0
-; SIMD128-FAST-NEXT: f32.const $push22=, 0x0p0
-; SIMD128-FAST-NEXT: f32.gt $push2=, $2, $pop22
-; SIMD128-FAST-NEXT: f32.select $push3=, $pop23, $pop1, $pop2
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push8=, $pop7, 1, $pop3
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push21=, $0, 2
-; SIMD128-FAST-NEXT: local.tee $push20=, $2=, $pop21
-; SIMD128-FAST-NEXT: f32.const $push19=, -0x1p0
-; SIMD128-FAST-NEXT: f32.const $push18=, -0x1p0
-; SIMD128-FAST-NEXT: f32.gt $push9=, $2, $pop18
-; SIMD128-FAST-NEXT: f32.select $push10=, $pop20, $pop19, $pop9
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push11=, $pop8, 2, $pop10
-; SIMD128-FAST-NEXT: f32x4.extract_lane $push17=, $0, 3
-; SIMD128-FAST-NEXT: local.tee $push16=, $2=, $pop17
-; SIMD128-FAST-NEXT: f32.const $push15=, -0x1p0
-; SIMD128-FAST-NEXT: f32.const $push14=, -0x1p0
-; SIMD128-FAST-NEXT: f32.gt $push12=, $2, $pop14
-; SIMD128-FAST-NEXT: f32.select $push13=, $pop16, $pop15, $pop12
-; SIMD128-FAST-NEXT: f32x4.replace_lane $push0=, $pop11, 3, $pop13
+; SIMD128-FAST-NEXT: v128.const $push1=, -0x1p0, 0x0p0, -0x1p0, -0x1p0
+; SIMD128-FAST-NEXT: f32x4.max $push0=, $0, $pop1
; SIMD128-FAST-NEXT: return $pop0
;
; NO-SIMD128-LABEL: maxnum_one_zero_intrinsic_v4f32:
@@ -12961,17 +12729,15 @@ define <4 x float> @maxnum_one_zero_intrinsic_v4f32(<4 x float> %x, <4 x float>
; NO-SIMD128-NEXT: f32.const $push0=, -0x1p0
; NO-SIMD128-NEXT: f32.max $push1=, $4, $pop0
; NO-SIMD128-NEXT: f32.store 12($0), $pop1
-; NO-SIMD128-NEXT: f32.const $push9=, -0x1p0
-; NO-SIMD128-NEXT: f32.max $push2=, $3, $pop9
+; NO-SIMD128-NEXT: f32.const $push7=, -0x1p0
+; NO-SIMD128-NEXT: f32.max $push2=, $3, $pop7
; NO-SIMD128-NEXT: f32.store 8($0), $pop2
; NO-SIMD128-NEXT: f32.const $push3=, 0x0p0
-; NO-SIMD128-NEXT: f32.const $push8=, 0x0p0
-; NO-SIMD128-NEXT: f32.gt $push4=, $2, $pop8
-; NO-SIMD128-NEXT: f32.select $push5=, $2, $pop3, $pop4
-; NO-SIMD128-NEXT: f32.store 4($0), $pop5
-; NO-SIMD128-NEXT: f32.const $push7=, -0x1p0
-; NO-SIMD128-NEXT: f32.max $push6=, $1, $pop7
-; NO-SIMD128-NEXT: f32.store 0($0), $pop6
+; NO-SIMD128-NEXT: f32.max $push4=, $2, $pop3
+; NO-SIMD128-NEXT: f32.store 4($0), $pop4
+; NO-SIMD128-NEXT: f32.const $push6=, -0x1p0
+; NO-SIMD128-NEXT: f32.max $push5=, $1, $pop6
+; NO-SIMD128-NEXT: f32.store 0($0), $pop5
; NO-SIMD128-NEXT: return
;
; NO-SIMD128-FAST-LABEL: maxnum_one_zero_intrinsic_v4f32:
@@ -12980,17 +12746,15 @@ define <4 x float> @maxnum_one_zero_intrinsic_v4f32(<4 x float> %x, <4 x float>
; NO-SIMD128-FAST-NEXT: f32.const $push0=, -0x1p0
; NO-SIMD128-FAST-NEXT: f32.max $push1=, $1, $pop0
; NO-SIMD128-FAST-NEXT: f32.store 0($0), $pop1
-; NO-SIMD128-FAST-NEXT: f32.const $push9=, -0x1p0
-; NO-SIMD128-FAST-NEXT: f32.max $push2=, $3, $pop9
-; NO-SIMD128-FAST-NEXT: f32.store 8($0), $pop2
-; NO-SIMD128-FAST-NEXT: f32.const $push3=, 0x0p0
-; NO-SIMD128-FAST-NEXT: f32.const $push8=, 0x0p0
-; NO-SIMD128-FAST-NEXT: f32.gt $push4=, $2, $pop8
-; NO-SIMD128-FAST-NEXT: f32.select $push5=, $2, $pop3, $pop4
-; NO-SIMD128-FAST-NEXT: f32.store 4($0), $pop5
+; NO-SIMD128-FAST-NEXT: f32.const $push2=, 0x0p0
+; NO-SIMD128-FAST-NEXT: f32.max $push3=, $2, $pop2
+; NO-SIMD128-FAST-NEXT: f32.store 4($0), $pop3
; NO-SIMD128-FAST-NEXT: f32.const $push7=, -0x1p0
-; NO-SIMD128-FAST-NEXT: f32.max $push6=, $4, $pop7
-; NO-SIMD128-FAST-NEXT: f32.store 12($0), $pop6
+; NO-SIMD128-FAST-NEXT: f32.max $push4=, $3, $pop7
+; NO-SIMD128-FAST-NEXT: f32.store 8($0), $pop4
+; NO-SIMD128-FAST-NEXT: f32.const $push6=, -0x1p0
+; NO-SIMD128-FAST-NEXT: f32.max $push5=, $4, $pop6
+; NO-SIMD128-FAST-NEXT: f32.store 12($0), $pop5
; NO-SIMD128-FAST-NEXT: return
%a = call nnan <4 x float> @llvm.maxnum.v4f32(<4 x float> %x, <4 x float><float -1.0, float 0.0, float -1.0, float -1.0>)
ret <4 x float> %a
More information about the llvm-commits
mailing list