[llvm] AArch64: Select FCANONICALIZE (PR #104429)
YunQiang Su via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 10 02:19:50 PDT 2024
https://github.com/wzssyqa updated https://github.com/llvm/llvm-project/pull/104429
>From cfc50bd7a82f1b88540bdb96dfc1ce005f910710 Mon Sep 17 00:00:00 2001
From: YunQiang Su <yunqiang at isrc.iscas.ac.cn>
Date: Thu, 15 Aug 2024 16:40:00 +0800
Subject: [PATCH 1/2] AArch64: Add FCANONICALIZE
FMINNM/FMAXNM instructions of AArch64 follow IEEE754-2008.
We can use them to canonicalize a floating point number.
---
llvm/lib/Target/AArch64/AArch64InstrInfo.td | 17 +
llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll | 168 ++++++
.../AArch64/fp-maximumnum-minimumnum.ll | 560 ++++++++++++++++++
3 files changed, 745 insertions(+)
create mode 100644 llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 33d05d6039b096..9f9311a545117a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -5063,6 +5063,12 @@ def : Pat<(fmaxnum_ieee (f32 FPR32:$a), (f32 FPR32:$b)),
def : Pat<(fmaxnum_ieee (f16 FPR16:$a), (f16 FPR16:$b)),
(FMAXNMHrr FPR16:$a, FPR16:$b)>;
+def : Pat<(f16 (fcanonicalize f16:$a)),
+ (FMINNMHrr f16:$a, f16:$a)>;
+def : Pat<(f32 (fcanonicalize f32:$a)),
+ (FMINNMSrr f32:$a, f32:$a)>;
+def : Pat<(f64 (fcanonicalize f64:$a)),
+ (FMINNMDrr f64:$a, f64:$a)>;
//===----------------------------------------------------------------------===//
// Floating point three operand instructions.
//===----------------------------------------------------------------------===//
@@ -5588,6 +5594,17 @@ def : Pat<(v2f32 (fmaxnum_ieee (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
def : Pat<(v4f16 (fmaxnum_ieee (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
(v4f16 (FMAXNMv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm)))>;
+def : Pat<(v2f64 (fcanonicalize (v2f64 V128:$Rn))),
+ (v2f64 (FMINNMv2f64 (v2f64 V128:$Rn), (v2f64 V128:$Rn)))>;
+def : Pat<(v4f32 (fcanonicalize (v4f32 V128:$Rn))),
+ (v4f32 (FMINNMv4f32 (v4f32 V128:$Rn), (v4f32 V128:$Rn)))>;
+def : Pat<(v8f16 (fcanonicalize (v8f16 V128:$Rn))),
+ (v8f16 (FMINNMv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rn)))>;
+def : Pat<(v2f32 (fcanonicalize (v2f32 V64:$Rn))),
+ (v2f32 (FMINNMv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rn)))>;
+def : Pat<(v4f16 (fcanonicalize (v4f16 V64:$Rn))),
+ (v4f16 (FMINNMv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rn)))>;
+
// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
// instruction expects the addend first, while the fma intrinsic puts it last.
defm FMLA : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
diff --git a/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll b/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
new file mode 100644
index 00000000000000..2df798bef9e434
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=aarch64 --mattr=+fullfp16 < %s | FileCheck %s --check-prefix=AARCH64
+
+declare half @llvm.fcanonicalize.f16(half)
+declare float @llvm.fcanonicalize.f32(float)
+declare double @llvm.fcanonicalize.f64(double)
+
+define half @fcanonicalize_f16(half %x) {
+; AARCH64-LABEL: fcanonicalize_f16:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: ret
+ %z = call half @llvm.canonicalize.f16(half %x)
+ ret half %z
+}
+
+define half @fcanonicalize_f16_nnan(half %x) {
+; AARCH64-LABEL: fcanonicalize_f16_nnan:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: ret
+ %z = call nnan half @llvm.canonicalize.f16(half %x)
+ ret half %z
+}
+
+define <2 x half> @fcanonicalize_v2f16(<2 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f16:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT: ret
+ %z = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %x)
+ ret <2 x half> %z
+}
+
+define <2 x half> @fcanonicalize_v2f16_nnan(<2 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f16_nnan:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT: ret
+ %z = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %x)
+ ret <2 x half> %z
+}
+
+define <4 x half> @fcanonicalize_v4f16(<4 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v4f16:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT: ret
+ %z = call <4 x half> @llvm.canonicalize.v4f16(<4 x half> %x)
+ ret <4 x half> %z
+}
+
+define <4 x half> @fcanonicalize_v4f16_nnan(<4 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v4f16_nnan:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT: ret
+ %z = call nnan <4 x half> @llvm.canonicalize.v4f16(<4 x half> %x)
+ ret <4 x half> %z
+}
+
+define <8 x half> @fcanonicalize_v8f16(<8 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v8f16:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT: ret
+ %z = call <8 x half> @llvm.canonicalize.v8f16(<8 x half> %x)
+ ret <8 x half> %z
+}
+
+define <8 x half> @fcanonicalize_v8f16_nnan(<8 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v8f16_nnan:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT: ret
+ %z = call nnan <8 x half> @llvm.canonicalize.v8f16(<8 x half> %x)
+ ret <8 x half> %z
+}
+
+define float @fcanonicalize_f32(float %x) {
+; AARCH64-LABEL: fcanonicalize_f32:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: ret
+ %z = call float @llvm.canonicalize.f32(float %x)
+ ret float %z
+}
+
+define float @fcanonicalize_f32_nnan(float %x) {
+; AARCH64-LABEL: fcanonicalize_f32_nnan:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: ret
+ %z = call nnan float @llvm.canonicalize.f32(float %x)
+ ret float %z
+}
+
+define <2 x float> @fcanonicalize_v2f32(<2 x float> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f32:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.2s, v0.2s, v0.2s
+; AARCH64-NEXT: ret
+ %z = call <2 x float> @llvm.canonicalize.v2f32(<2 x float> %x)
+ ret <2 x float> %z
+}
+
+define <2 x float> @fcanonicalize_v2f32_nnan(<2 x float> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f32_nnan:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.2s, v0.2s, v0.2s
+; AARCH64-NEXT: ret
+ %z = call nnan <2 x float> @llvm.canonicalize.v2f32(<2 x float> %x)
+ ret <2 x float> %z
+}
+
+define <4 x float> @fcanonicalize_v4f32(<4 x float> %x) {
+; AARCH64-LABEL: fcanonicalize_v4f32:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT: ret
+ %z = call <4 x float> @llvm.canonicalize.v4f32(<4 x float> %x)
+ ret <4 x float> %z
+}
+
+define <4 x float> @fcanonicalize_v4f32_nnan(<4 x float> %x) {
+; AARCH64-LABEL: fcanonicalize_v4f32_nnan:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT: ret
+ %z = call nnan <4 x float> @llvm.canonicalize.v4f32(<4 x float> %x)
+ ret <4 x float> %z
+}
+
+define double @fcanonicalize_f64(double %x) {
+; AARCH64-LABEL: fcanonicalize_f64:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm d0, d0, d0
+; AARCH64-NEXT: ret
+ %z = call double @llvm.canonicalize.f64(double %x)
+ ret double %z
+}
+
+define double @fcanonicalize_f64_nnan(double %x) {
+; AARCH64-LABEL: fcanonicalize_f64_nnan:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm d0, d0, d0
+; AARCH64-NEXT: ret
+ %z = call nnan double @llvm.canonicalize.f64(double %x)
+ ret double %z
+}
+
+define <2 x double> @fcanonicalize_v2f64(<2 x double> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f64:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT: ret
+ %z = call <2 x double> @llvm.canonicalize.v2f64(<2 x double> %x)
+ ret <2 x double> %z
+}
+
+define <2 x double> @fcanonicalize_v2f64_nnan(<2 x double> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f64_nnan:
+; AARCH64: // %bb.0:
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT: ret
+ %z = call nnan <2 x double> @llvm.canonicalize.v2f64(<2 x double> %x)
+ ret <2 x double> %z
+}
diff --git a/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll b/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
index b8406179f3cb32..bb3f9a3e52a16b 100644
--- a/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
+++ b/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
@@ -472,3 +472,563 @@ entry:
%c = call nnan <16 x half> @llvm.minimumnum.v16f16(<16 x half> %a, <16 x half> %b)
ret <16 x half> %c
}
+
+;;;;;;;;;;;;;;;; max_f64
+define double @max_f64(double %a, double %b) {
+; AARCH64-LABEL: max_f64:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm d1, d1, d1
+; AARCH64-NEXT: fminnm d0, d0, d0
+; AARCH64-NEXT: fmaxnm d0, d0, d1
+; AARCH64-NEXT: ret
+entry:
+ %c = call double @llvm.maximumnum.f64(double %a, double %b)
+ ret double %c
+}
+
+define <2 x double> @max_v2f64(<2 x double> %a, <2 x double> %b) {
+; AARCH64-LABEL: max_v2f64:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.2d, v1.2d, v1.2d
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT: fmaxnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT: ret
+entry:
+ %c = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> %a, <2 x double> %b)
+ ret <2 x double> %c
+}
+
+define <3 x double> @max_v3f64(<3 x double> %a, <3 x double> %b) {
+; AARCH64-LABEL: max_v3f64:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: // kill: def $d3 killed $d3 def $q3
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: // kill: def $d4 killed $d4 def $q4
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d2 killed $d2 def $q2
+; AARCH64-NEXT: // kill: def $d5 killed $d5 def $q5
+; AARCH64-NEXT: mov v0.d[1], v1.d[0]
+; AARCH64-NEXT: mov v3.d[1], v4.d[0]
+; AARCH64-NEXT: fminnm v2.2d, v2.2d, v2.2d
+; AARCH64-NEXT: fminnm v1.2d, v3.2d, v3.2d
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT: fmaxnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT: fminnm v1.2d, v5.2d, v5.2d
+; AARCH64-NEXT: fmaxnm v2.2d, v2.2d, v1.2d
+; AARCH64-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; AARCH64-NEXT: // kill: def $d0 killed $d0 killed $q0
+; AARCH64-NEXT: // kill: def $d1 killed $d1 killed $q1
+; AARCH64-NEXT: // kill: def $d2 killed $d2 killed $q2
+; AARCH64-NEXT: ret
+entry:
+ %c = call <3 x double> @llvm.maximumnum.v3f64(<3 x double> %a, <3 x double> %b)
+ ret <3 x double> %c
+}
+
+define <4 x double> @max_v4f64(<4 x double> %a, <4 x double> %b) {
+; AARCH64-LABEL: max_v4f64:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v2.2d, v2.2d, v2.2d
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT: fminnm v3.2d, v3.2d, v3.2d
+; AARCH64-NEXT: fminnm v1.2d, v1.2d, v1.2d
+; AARCH64-NEXT: fmaxnm v0.2d, v0.2d, v2.2d
+; AARCH64-NEXT: fmaxnm v1.2d, v1.2d, v3.2d
+; AARCH64-NEXT: ret
+entry:
+ %c = call <4 x double> @llvm.maximumnum.v4f64(<4 x double> %a, <4 x double> %b)
+ ret <4 x double> %c
+}
+
+;;;;;;;;;;;;;;;;;; max_f32
+define float @max_f32(float %a, float %b) {
+; AARCH64-LABEL: max_f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: ret
+entry:
+ %c = call float @llvm.maximumnum.f32(float %a, float %b)
+ ret float %c
+}
+
+define <2 x float> @max_v2f32(<2 x float> %a, <2 x float> %b) {
+; AARCH64-LABEL: max_v2f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.2s, v1.2s, v1.2s
+; AARCH64-NEXT: fminnm v0.2s, v0.2s, v0.2s
+; AARCH64-NEXT: fmaxnm v0.2s, v0.2s, v1.2s
+; AARCH64-NEXT: ret
+entry:
+ %c = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> %a, <2 x float> %b)
+ ret <2 x float> %c
+}
+
+define <3 x float> @max_v3f32(<3 x float> %a, <3 x float> %b) {
+; AARCH64-LABEL: max_v3f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: ret
+entry:
+ %c = call <3 x float> @llvm.maximumnum.v3f32(<3 x float> %a, <3 x float> %b)
+ ret <3 x float> %c
+}
+
+define <4 x float> @max_v4f32(<4 x float> %a, <4 x float> %b) {
+; AARCH64-LABEL: max_v4f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: ret
+entry:
+ %c = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %c
+}
+
+define <5 x float> @max_v5f32(<5 x float> %a, <5 x float> %b) {
+; AARCH64-LABEL: max_v5f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: // kill: def $s0 killed $s0 def $q0
+; AARCH64-NEXT: // kill: def $s5 killed $s5 def $q5
+; AARCH64-NEXT: // kill: def $s1 killed $s1 def $q1
+; AARCH64-NEXT: // kill: def $s6 killed $s6 def $q6
+; AARCH64-NEXT: // kill: def $s2 killed $s2 def $q2
+; AARCH64-NEXT: // kill: def $s7 killed $s7 def $q7
+; AARCH64-NEXT: // kill: def $s3 killed $s3 def $q3
+; AARCH64-NEXT: mov x8, sp
+; AARCH64-NEXT: // kill: def $s4 killed $s4 def $q4
+; AARCH64-NEXT: mov v0.s[1], v1.s[0]
+; AARCH64-NEXT: mov v5.s[1], v6.s[0]
+; AARCH64-NEXT: mov v0.s[2], v2.s[0]
+; AARCH64-NEXT: mov v5.s[2], v7.s[0]
+; AARCH64-NEXT: ldr s2, [sp, #8]
+; AARCH64-NEXT: fminnm v2.4s, v2.4s, v2.4s
+; AARCH64-NEXT: mov v0.s[3], v3.s[0]
+; AARCH64-NEXT: ld1 { v5.s }[3], [x8]
+; AARCH64-NEXT: fminnm v3.4s, v4.4s, v4.4s
+; AARCH64-NEXT: fminnm v1.4s, v5.4s, v5.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT: fmaxnm v4.4s, v3.4s, v2.4s
+; AARCH64-NEXT: // kill: def $s4 killed $s4 killed $q4
+; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: mov s1, v0.s[1]
+; AARCH64-NEXT: mov s2, v0.s[2]
+; AARCH64-NEXT: mov s3, v0.s[3]
+; AARCH64-NEXT: // kill: def $s0 killed $s0 killed $q0
+; AARCH64-NEXT: ret
+entry:
+ %c = call <5 x float> @llvm.maximumnum.v5f32(<5 x float> %a, <5 x float> %b)
+ ret <5 x float> %c
+}
+
+define <8 x float> @max_v8f32(<8 x float> %a, <8 x float> %b) {
+; AARCH64-LABEL: max_v8f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v2.4s, v2.4s, v2.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT: fminnm v3.4s, v3.4s, v3.4s
+; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v2.4s
+; AARCH64-NEXT: fmaxnm v1.4s, v1.4s, v3.4s
+; AARCH64-NEXT: ret
+entry:
+ %c = call <8 x float> @llvm.maximumnum.v8f32(<8 x float> %a, <8 x float> %b)
+ ret <8 x float> %c
+}
+
+;;;;;;;;;;;;;;;;;; max_f16
+define half @max_f16(half %a, half %b) {
+; AARCH64-LABEL: max_f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: fmaxnm h0, h0, h1
+; AARCH64-NEXT: ret
+entry:
+ %c = call half @llvm.maximumnum.f16(half %a, half %b)
+ ret half %c
+}
+
+define <2 x half> @max_v2f16(<2 x half> %a, <2 x half> %b) {
+; AARCH64-LABEL: max_v2f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.4h, v1.4h, v1.4h
+; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT: fmaxnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: ret
+entry:
+ %c = call <2 x half> @llvm.maximumnum.v2f16(<2 x half> %a, <2 x half> %b)
+ ret <2 x half> %c
+}
+
+define <4 x half> @max_v4f16(<4 x half> %a, <4 x half> %b) {
+; AARCH64-LABEL: max_v4f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.4h, v1.4h, v1.4h
+; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT: fmaxnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: ret
+entry:
+ %c = call <4 x half> @llvm.maximumnum.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %c
+}
+
+define <8 x half> @max_v8f16(<8 x half> %a, <8 x half> %b) {
+; AARCH64-LABEL: max_v8f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT: fmaxnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT: ret
+entry:
+ %c = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x half> %c
+}
+
+define <9 x half> @max_v9f16(<9 x half> %a, <9 x half> %b) {
+; AARCH64-LABEL: max_v9f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: // kill: def $h0 killed $h0 def $q0
+; AARCH64-NEXT: // kill: def $h1 killed $h1 def $q1
+; AARCH64-NEXT: // kill: def $h2 killed $h2 def $q2
+; AARCH64-NEXT: add x9, sp, #16
+; AARCH64-NEXT: // kill: def $h3 killed $h3 def $q3
+; AARCH64-NEXT: // kill: def $h4 killed $h4 def $q4
+; AARCH64-NEXT: // kill: def $h5 killed $h5 def $q5
+; AARCH64-NEXT: // kill: def $h6 killed $h6 def $q6
+; AARCH64-NEXT: // kill: def $h7 killed $h7 def $q7
+; AARCH64-NEXT: mov v0.h[1], v1.h[0]
+; AARCH64-NEXT: ldr h1, [sp, #8]
+; AARCH64-NEXT: ld1 { v1.h }[1], [x9]
+; AARCH64-NEXT: add x9, sp, #24
+; AARCH64-NEXT: mov v0.h[2], v2.h[0]
+; AARCH64-NEXT: ldr h2, [sp]
+; AARCH64-NEXT: ld1 { v1.h }[2], [x9]
+; AARCH64-NEXT: add x9, sp, #32
+; AARCH64-NEXT: fminnm v2.8h, v2.8h, v2.8h
+; AARCH64-NEXT: mov v0.h[3], v3.h[0]
+; AARCH64-NEXT: ld1 { v1.h }[3], [x9]
+; AARCH64-NEXT: add x9, sp, #40
+; AARCH64-NEXT: ldr h3, [sp, #72]
+; AARCH64-NEXT: ld1 { v1.h }[4], [x9]
+; AARCH64-NEXT: add x9, sp, #48
+; AARCH64-NEXT: fminnm v3.8h, v3.8h, v3.8h
+; AARCH64-NEXT: mov v0.h[4], v4.h[0]
+; AARCH64-NEXT: ld1 { v1.h }[5], [x9]
+; AARCH64-NEXT: add x9, sp, #56
+; AARCH64-NEXT: fmaxnm v2.8h, v2.8h, v3.8h
+; AARCH64-NEXT: mov v0.h[5], v5.h[0]
+; AARCH64-NEXT: ld1 { v1.h }[6], [x9]
+; AARCH64-NEXT: add x9, sp, #64
+; AARCH64-NEXT: str h2, [x8, #16]
+; AARCH64-NEXT: mov v0.h[6], v6.h[0]
+; AARCH64-NEXT: ld1 { v1.h }[7], [x9]
+; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT: mov v0.h[7], v7.h[0]
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT: fmaxnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT: str q0, [x8]
+; AARCH64-NEXT: ret
+entry:
+ %c = call <9 x half> @llvm.maximumnum.v9f16(<9 x half> %a, <9 x half> %b)
+ ret <9 x half> %c
+}
+
+define <16 x half> @max_v16f16(<16 x half> %a, <16 x half> %b) {
+; AARCH64-LABEL: max_v16f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v2.8h, v2.8h, v2.8h
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT: fminnm v3.8h, v3.8h, v3.8h
+; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT: fmaxnm v0.8h, v0.8h, v2.8h
+; AARCH64-NEXT: fmaxnm v1.8h, v1.8h, v3.8h
+; AARCH64-NEXT: ret
+entry:
+ %c = call <16 x half> @llvm.maximumnum.v16f16(<16 x half> %a, <16 x half> %b)
+ ret <16 x half> %c
+}
+
+;;;;;;;;;;;;;;;; min_f64
+define double @min_f64(double %a, double %b) {
+; AARCH64-LABEL: min_f64:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm d1, d1, d1
+; AARCH64-NEXT: fminnm d0, d0, d0
+; AARCH64-NEXT: fminnm d0, d0, d1
+; AARCH64-NEXT: ret
+entry:
+ %c = call double @llvm.minimumnum.f64(double %a, double %b)
+ ret double %c
+}
+
+define <2 x double> @min_v2f64(<2 x double> %a, <2 x double> %b) {
+; AARCH64-LABEL: min_v2f64:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.2d, v1.2d, v1.2d
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT: ret
+entry:
+ %c = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %a, <2 x double> %b)
+ ret <2 x double> %c
+}
+
+define <3 x double> @min_v3f64(<3 x double> %a, <3 x double> %b) {
+; AARCH64-LABEL: min_v3f64:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: // kill: def $d3 killed $d3 def $q3
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: // kill: def $d4 killed $d4 def $q4
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d2 killed $d2 def $q2
+; AARCH64-NEXT: // kill: def $d5 killed $d5 def $q5
+; AARCH64-NEXT: mov v0.d[1], v1.d[0]
+; AARCH64-NEXT: mov v3.d[1], v4.d[0]
+; AARCH64-NEXT: fminnm v2.2d, v2.2d, v2.2d
+; AARCH64-NEXT: fminnm v1.2d, v3.2d, v3.2d
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT: fminnm v1.2d, v5.2d, v5.2d
+; AARCH64-NEXT: fminnm v2.2d, v2.2d, v1.2d
+; AARCH64-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; AARCH64-NEXT: // kill: def $d0 killed $d0 killed $q0
+; AARCH64-NEXT: // kill: def $d1 killed $d1 killed $q1
+; AARCH64-NEXT: // kill: def $d2 killed $d2 killed $q2
+; AARCH64-NEXT: ret
+entry:
+ %c = call <3 x double> @llvm.minimumnum.v3f64(<3 x double> %a, <3 x double> %b)
+ ret <3 x double> %c
+}
+
+define <4 x double> @min_v4f64(<4 x double> %a, <4 x double> %b) {
+; AARCH64-LABEL: min_v4f64:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v2.2d, v2.2d, v2.2d
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT: fminnm v3.2d, v3.2d, v3.2d
+; AARCH64-NEXT: fminnm v1.2d, v1.2d, v1.2d
+; AARCH64-NEXT: fminnm v0.2d, v0.2d, v2.2d
+; AARCH64-NEXT: fminnm v1.2d, v1.2d, v3.2d
+; AARCH64-NEXT: ret
+entry:
+ %c = call <4 x double> @llvm.minimumnum.v4f64(<4 x double> %a, <4 x double> %b)
+ ret <4 x double> %c
+}
+
+;;;;;;;;;;;;;;;;;; min_f32
+define float @min_f32(float %a, float %b) {
+; AARCH64-LABEL: min_f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: ret
+entry:
+ %c = call float @llvm.minimumnum.f32(float %a, float %b)
+ ret float %c
+}
+
+define <2 x float> @min_v2f32(<2 x float> %a, <2 x float> %b) {
+; AARCH64-LABEL: min_v2f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.2s, v1.2s, v1.2s
+; AARCH64-NEXT: fminnm v0.2s, v0.2s, v0.2s
+; AARCH64-NEXT: fminnm v0.2s, v0.2s, v1.2s
+; AARCH64-NEXT: ret
+entry:
+ %c = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> %a, <2 x float> %b)
+ ret <2 x float> %c
+}
+
+define <3 x float> @min_v3f32(<3 x float> %a, <3 x float> %b) {
+; AARCH64-LABEL: min_v3f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: ret
+entry:
+ %c = call <3 x float> @llvm.minimumnum.v3f32(<3 x float> %a, <3 x float> %b)
+ ret <3 x float> %c
+}
+
+define <4 x float> @min_v4f32(<4 x float> %a, <4 x float> %b) {
+; AARCH64-LABEL: min_v4f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: ret
+entry:
+ %c = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %c
+}
+
+define <5 x float> @min_v5f32(<5 x float> %a, <5 x float> %b) {
+; AARCH64-LABEL: min_v5f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: // kill: def $s0 killed $s0 def $q0
+; AARCH64-NEXT: // kill: def $s5 killed $s5 def $q5
+; AARCH64-NEXT: // kill: def $s1 killed $s1 def $q1
+; AARCH64-NEXT: // kill: def $s6 killed $s6 def $q6
+; AARCH64-NEXT: // kill: def $s2 killed $s2 def $q2
+; AARCH64-NEXT: // kill: def $s7 killed $s7 def $q7
+; AARCH64-NEXT: // kill: def $s3 killed $s3 def $q3
+; AARCH64-NEXT: mov x8, sp
+; AARCH64-NEXT: // kill: def $s4 killed $s4 def $q4
+; AARCH64-NEXT: mov v0.s[1], v1.s[0]
+; AARCH64-NEXT: mov v5.s[1], v6.s[0]
+; AARCH64-NEXT: mov v0.s[2], v2.s[0]
+; AARCH64-NEXT: mov v5.s[2], v7.s[0]
+; AARCH64-NEXT: ldr s2, [sp, #8]
+; AARCH64-NEXT: fminnm v2.4s, v2.4s, v2.4s
+; AARCH64-NEXT: mov v0.s[3], v3.s[0]
+; AARCH64-NEXT: ld1 { v5.s }[3], [x8]
+; AARCH64-NEXT: fminnm v3.4s, v4.4s, v4.4s
+; AARCH64-NEXT: fminnm v1.4s, v5.4s, v5.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT: fminnm v4.4s, v3.4s, v2.4s
+; AARCH64-NEXT: // kill: def $s4 killed $s4 killed $q4
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: mov s1, v0.s[1]
+; AARCH64-NEXT: mov s2, v0.s[2]
+; AARCH64-NEXT: mov s3, v0.s[3]
+; AARCH64-NEXT: // kill: def $s0 killed $s0 killed $q0
+; AARCH64-NEXT: ret
+entry:
+ %c = call <5 x float> @llvm.minimumnum.v5f32(<5 x float> %a, <5 x float> %b)
+ ret <5 x float> %c
+}
+
+define <8 x float> @min_v8f32(<8 x float> %a, <8 x float> %b) {
+; AARCH64-LABEL: min_v8f32:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v2.4s, v2.4s, v2.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT: fminnm v3.4s, v3.4s, v3.4s
+; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT: fminnm v0.4s, v0.4s, v2.4s
+; AARCH64-NEXT: fminnm v1.4s, v1.4s, v3.4s
+; AARCH64-NEXT: ret
+entry:
+ %c = call <8 x float> @llvm.minimumnum.v8f32(<8 x float> %a, <8 x float> %b)
+ ret <8 x float> %c
+}
+
+;;;;;;;;;;;;;;;;;; min_f16
+define half @min_f16(half %a, half %b) {
+; AARCH64-LABEL: min_f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: fminnm h0, h0, h1
+; AARCH64-NEXT: ret
+entry:
+ %c = call half @llvm.minimumnum.f16(half %a, half %b)
+ ret half %c
+}
+
+define <2 x half> @min_v2f16(<2 x half> %a, <2 x half> %b) {
+; AARCH64-LABEL: min_v2f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.4h, v1.4h, v1.4h
+; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT: fminnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: ret
+entry:
+ %c = call <2 x half> @llvm.minimumnum.v2f16(<2 x half> %a, <2 x half> %b)
+ ret <2 x half> %c
+}
+
+define <4 x half> @min_v4f16(<4 x half> %a, <4 x half> %b) {
+; AARCH64-LABEL: min_v4f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.4h, v1.4h, v1.4h
+; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT: fminnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: ret
+entry:
+ %c = call <4 x half> @llvm.minimumnum.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %c
+}
+
+define <8 x half> @min_v8f16(<8 x half> %a, <8 x half> %b) {
+; AARCH64-LABEL: min_v8f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT: ret
+entry:
+ %c = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x half> %c
+}
+
+define <9 x half> @min_v9f16(<9 x half> %a, <9 x half> %b) {
+; AARCH64-LABEL: min_v9f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: // kill: def $h0 killed $h0 def $q0
+; AARCH64-NEXT: // kill: def $h1 killed $h1 def $q1
+; AARCH64-NEXT: // kill: def $h2 killed $h2 def $q2
+; AARCH64-NEXT: add x9, sp, #16
+; AARCH64-NEXT: // kill: def $h3 killed $h3 def $q3
+; AARCH64-NEXT: // kill: def $h4 killed $h4 def $q4
+; AARCH64-NEXT: // kill: def $h5 killed $h5 def $q5
+; AARCH64-NEXT: // kill: def $h6 killed $h6 def $q6
+; AARCH64-NEXT: // kill: def $h7 killed $h7 def $q7
+; AARCH64-NEXT: mov v0.h[1], v1.h[0]
+; AARCH64-NEXT: ldr h1, [sp, #8]
+; AARCH64-NEXT: ld1 { v1.h }[1], [x9]
+; AARCH64-NEXT: add x9, sp, #24
+; AARCH64-NEXT: mov v0.h[2], v2.h[0]
+; AARCH64-NEXT: ldr h2, [sp]
+; AARCH64-NEXT: ld1 { v1.h }[2], [x9]
+; AARCH64-NEXT: add x9, sp, #32
+; AARCH64-NEXT: fminnm v2.8h, v2.8h, v2.8h
+; AARCH64-NEXT: mov v0.h[3], v3.h[0]
+; AARCH64-NEXT: ld1 { v1.h }[3], [x9]
+; AARCH64-NEXT: add x9, sp, #40
+; AARCH64-NEXT: ldr h3, [sp, #72]
+; AARCH64-NEXT: ld1 { v1.h }[4], [x9]
+; AARCH64-NEXT: add x9, sp, #48
+; AARCH64-NEXT: fminnm v3.8h, v3.8h, v3.8h
+; AARCH64-NEXT: mov v0.h[4], v4.h[0]
+; AARCH64-NEXT: ld1 { v1.h }[5], [x9]
+; AARCH64-NEXT: add x9, sp, #56
+; AARCH64-NEXT: fminnm v2.8h, v2.8h, v3.8h
+; AARCH64-NEXT: mov v0.h[5], v5.h[0]
+; AARCH64-NEXT: ld1 { v1.h }[6], [x9]
+; AARCH64-NEXT: add x9, sp, #64
+; AARCH64-NEXT: str h2, [x8, #16]
+; AARCH64-NEXT: mov v0.h[6], v6.h[0]
+; AARCH64-NEXT: ld1 { v1.h }[7], [x9]
+; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT: mov v0.h[7], v7.h[0]
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT: str q0, [x8]
+; AARCH64-NEXT: ret
+entry:
+ %c = call <9 x half> @llvm.minimumnum.v9f16(<9 x half> %a, <9 x half> %b)
+ ret <9 x half> %c
+}
+
+define <16 x half> @min_v16f16(<16 x half> %a, <16 x half> %b) {
+; AARCH64-LABEL: min_v16f16:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm v2.8h, v2.8h, v2.8h
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT: fminnm v3.8h, v3.8h, v3.8h
+; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT: fminnm v0.8h, v0.8h, v2.8h
+; AARCH64-NEXT: fminnm v1.8h, v1.8h, v3.8h
+; AARCH64-NEXT: ret
+entry:
+ %c = call <16 x half> @llvm.minimumnum.v16f16(<16 x half> %a, <16 x half> %b)
+ ret <16 x half> %c
+}
>From 8798df358211674dc2e170e80c9b0e4c9973bac6 Mon Sep 17 00:00:00 2001
From: Your Name <you at example.com>
Date: Thu, 10 Oct 2024 17:12:22 +0800
Subject: [PATCH 2/2] neon fullfp16 may be disabled
---
.../Target/AArch64/AArch64ISelLowering.cpp | 6 +
llvm/lib/Target/AArch64/AArch64InstrInfo.td | 60 +-
llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll | 565 +++++++++++++++---
3 files changed, 531 insertions(+), 100 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c1aefee3793c96..b00f5ee418f1f5 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -880,6 +880,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(Op, MVT::f16, Legal);
}
// clang-format on
+ if (!Subtarget->hasFullFP16())
+ setOperationPromotedToType(ISD::FCANONICALIZE, MVT::f16, MVT::f32);
// Basic strict FP operations are legal
for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
@@ -1362,6 +1364,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
for (MVT Ty : {MVT::v4f16, MVT::v8f16})
setOperationAction(Op, Ty, Legal);
}
+ if (!Subtarget->hasFullFP16()) {
+ setOperationPromotedToType(ISD::FCANONICALIZE, MVT::v4f16, MVT::v4f32);
+ setOperationPromotedToType(ISD::FCANONICALIZE, MVT::v8f16, MVT::v8f32);
+ }
// LRINT and LLRINT.
for (auto Op : {ISD::LRINT, ISD::LLRINT}) {
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 9f9311a545117a..325508b62a9f14 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -5052,23 +5052,25 @@ def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
def : Pat<(fminnum_ieee (f64 FPR64:$a), (f64 FPR64:$b)),
(FMINNMDrr FPR64:$a, FPR64:$b)>;
-def : Pat<(fminnum_ieee (f32 FPR32:$a), (f32 FPR32:$b)),
- (FMINNMSrr FPR32:$a, FPR32:$b)>;
-def : Pat<(fminnum_ieee (f16 FPR16:$a), (f16 FPR16:$b)),
- (FMINNMHrr FPR16:$a, FPR16:$b)>;
def : Pat<(fmaxnum_ieee (f64 FPR64:$a), (f64 FPR64:$b)),
(FMAXNMDrr FPR64:$a, FPR64:$b)>;
+def : Pat<(f64 (fcanonicalize f64:$a)),
+ (FMINNMDrr f64:$a, f64:$a)>;
+def : Pat<(fminnum_ieee (f32 FPR32:$a), (f32 FPR32:$b)),
+ (FMINNMSrr FPR32:$a, FPR32:$b)>;
def : Pat<(fmaxnum_ieee (f32 FPR32:$a), (f32 FPR32:$b)),
(FMAXNMSrr FPR32:$a, FPR32:$b)>;
+def : Pat<(f32 (fcanonicalize f32:$a)),
+ (FMINNMSrr f32:$a, f32:$a)>;
+
+let Predicates = [HasFullFP16] in {
+def : Pat<(fminnum_ieee (f16 FPR16:$a), (f16 FPR16:$b)),
+ (FMINNMHrr FPR16:$a, FPR16:$b)>;
def : Pat<(fmaxnum_ieee (f16 FPR16:$a), (f16 FPR16:$b)),
(FMAXNMHrr FPR16:$a, FPR16:$b)>;
-
def : Pat<(f16 (fcanonicalize f16:$a)),
(FMINNMHrr f16:$a, f16:$a)>;
-def : Pat<(f32 (fcanonicalize f32:$a)),
- (FMINNMSrr f32:$a, f32:$a)>;
-def : Pat<(f64 (fcanonicalize f64:$a)),
- (FMINNMDrr f64:$a, f64:$a)>;
+}
//===----------------------------------------------------------------------===//
// Floating point three operand instructions.
//===----------------------------------------------------------------------===//
@@ -5573,37 +5575,41 @@ defm FMINNM : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", any_fminnum>;
defm FMINP : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
defm FMIN : SIMDThreeSameVectorFP<0,1,0b110,"fmin", any_fminimum>;
+let Predicates = [HasNEON] in {
def : Pat<(v2f64 (fminnum_ieee (v2f64 V128:$Rn), (v2f64 V128:$Rm))),
(v2f64 (FMINNMv2f64 (v2f64 V128:$Rn), (v2f64 V128:$Rm)))>;
-def : Pat<(v4f32 (fminnum_ieee (v4f32 V128:$Rn), (v4f32 V128:$Rm))),
- (v4f32 (FMINNMv4f32 (v4f32 V128:$Rn), (v4f32 V128:$Rm)))>;
-def : Pat<(v8f16 (fminnum_ieee (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
- (v8f16 (FMINNMv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm)))>;
-def : Pat<(v2f32 (fminnum_ieee (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
- (v2f32 (FMINNMv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm)))>;
-def : Pat<(v4f16 (fminnum_ieee (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
- (v4f16 (FMINNMv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm)))>;
def : Pat<(v2f64 (fmaxnum_ieee (v2f64 V128:$Rn), (v2f64 V128:$Rm))),
(v2f64 (FMAXNMv2f64 (v2f64 V128:$Rn), (v2f64 V128:$Rm)))>;
+def : Pat<(v2f64 (fcanonicalize (v2f64 V128:$Rn))),
+ (v2f64 (FMINNMv2f64 (v2f64 V128:$Rn), (v2f64 V128:$Rn)))>;
+def : Pat<(v4f32 (fminnum_ieee (v4f32 V128:$Rn), (v4f32 V128:$Rm))),
+ (v4f32 (FMINNMv4f32 (v4f32 V128:$Rn), (v4f32 V128:$Rm)))>;
def : Pat<(v4f32 (fmaxnum_ieee (v4f32 V128:$Rn), (v4f32 V128:$Rm))),
(v4f32 (FMAXNMv4f32 (v4f32 V128:$Rn), (v4f32 V128:$Rm)))>;
-def : Pat<(v8f16 (fmaxnum_ieee (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
- (v8f16 (FMAXNMv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm)))>;
+def : Pat<(v4f32 (fcanonicalize (v4f32 V128:$Rn))),
+ (v4f32 (FMINNMv4f32 (v4f32 V128:$Rn), (v4f32 V128:$Rn)))>;
+def : Pat<(v2f32 (fminnum_ieee (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
+ (v2f32 (FMINNMv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm)))>;
def : Pat<(v2f32 (fmaxnum_ieee (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
(v2f32 (FMAXNMv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm)))>;
-def : Pat<(v4f16 (fmaxnum_ieee (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
- (v4f16 (FMAXNMv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm)))>;
+def : Pat<(v2f32 (fcanonicalize (v2f32 V64:$Rn))),
+ (v2f32 (FMINNMv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rn)))>;
+}
-def : Pat<(v2f64 (fcanonicalize (v2f64 V128:$Rn))),
- (v2f64 (FMINNMv2f64 (v2f64 V128:$Rn), (v2f64 V128:$Rn)))>;
-def : Pat<(v4f32 (fcanonicalize (v4f32 V128:$Rn))),
- (v4f32 (FMINNMv4f32 (v4f32 V128:$Rn), (v4f32 V128:$Rn)))>;
+let Predicates = [HasNEON, HasFullFP16] in {
+def : Pat<(v8f16 (fminnum_ieee (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
+ (v8f16 (FMINNMv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm)))>;
+def : Pat<(v8f16 (fmaxnum_ieee (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
+ (v8f16 (FMAXNMv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm)))>;
def : Pat<(v8f16 (fcanonicalize (v8f16 V128:$Rn))),
(v8f16 (FMINNMv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rn)))>;
-def : Pat<(v2f32 (fcanonicalize (v2f32 V64:$Rn))),
- (v2f32 (FMINNMv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rn)))>;
+def : Pat<(v4f16 (fminnum_ieee (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
+ (v4f16 (FMINNMv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm)))>;
+def : Pat<(v4f16 (fmaxnum_ieee (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
+ (v4f16 (FMAXNMv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm)))>;
def : Pat<(v4f16 (fcanonicalize (v4f16 V64:$Rn))),
(v4f16 (FMINNMv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rn)))>;
+}
// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
// instruction expects the addend first, while the fma intrinsic puts it last.
diff --git a/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll b/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
index 2df798bef9e434..753e2b73433994 100644
--- a/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
+++ b/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
@@ -1,168 +1,587 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc --mtriple=aarch64 --mattr=+fullfp16 < %s | FileCheck %s --check-prefix=AARCH64
+; RUN: llc --mtriple=aarch64 --mattr=-fullfp16,-neon < %s | FileCheck %s --check-prefix=CHECK-NOFP16-NONEON
+; RUN: llc --mtriple=aarch64 --mattr=+fullfp16,-neon < %s | FileCheck %s --check-prefix=CHECK-FP16-NONEON
+; RUN: llc --mtriple=aarch64 --mattr=-fullfp16,+neon < %s | FileCheck %s --check-prefix=CHECK-NOFP16-NEON
+; RUN: llc --mtriple=aarch64 --mattr=+fullfp16,+neon < %s | FileCheck %s --check-prefixes=CHECK-FP16-NEON
declare half @llvm.fcanonicalize.f16(half)
declare float @llvm.fcanonicalize.f32(float)
declare double @llvm.fcanonicalize.f64(double)
define half @fcanonicalize_f16(half %x) {
-; AARCH64-LABEL: fcanonicalize_f16:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm h0, h0, h0
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_f16:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fcvt s0, h0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fcvt h0, s0
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_f16:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm h0, h0, h0
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_f16:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fcvt s0, h0
+; CHECK-NOFP16-NEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NEON-NEXT: fcvt h0, s0
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_f16:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm h0, h0, h0
+; CHECK-FP16-NEON-NEXT: ret
%z = call half @llvm.canonicalize.f16(half %x)
ret half %z
}
define half @fcanonicalize_f16_nnan(half %x) {
-; AARCH64-LABEL: fcanonicalize_f16_nnan:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm h0, h0, h0
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_f16_nnan:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fcvt s0, h0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fcvt h0, s0
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_f16_nnan:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm h0, h0, h0
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_f16_nnan:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fcvt s0, h0
+; CHECK-NOFP16-NEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NEON-NEXT: fcvt h0, s0
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_f16_nnan:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm h0, h0, h0
+; CHECK-FP16-NEON-NEXT: ret
%z = call nnan half @llvm.canonicalize.f16(half %x)
ret half %z
}
define <2 x half> @fcanonicalize_v2f16(<2 x half> %x) {
-; AARCH64-LABEL: fcanonicalize_v2f16:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v2f16:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fcvt s0, h0
+; CHECK-NOFP16-NONEON-NEXT: fcvt s1, h1
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-NOFP16-NONEON-NEXT: fcvt h0, s0
+; CHECK-NOFP16-NONEON-NEXT: fcvt h1, s1
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v2f16:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm h0, h0, h0
+; CHECK-FP16-NONEON-NEXT: fminnm h1, h1, h1
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v2f16:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-NOFP16-NEON-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; CHECK-NOFP16-NEON-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v2f16:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; CHECK-FP16-NEON-NEXT: ret
%z = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %x)
ret <2 x half> %z
}
define <2 x half> @fcanonicalize_v2f16_nnan(<2 x half> %x) {
-; AARCH64-LABEL: fcanonicalize_v2f16_nnan:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v2f16_nnan:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fcvt s0, h0
+; CHECK-NOFP16-NONEON-NEXT: fcvt s1, h1
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-NOFP16-NONEON-NEXT: fcvt h0, s0
+; CHECK-NOFP16-NONEON-NEXT: fcvt h1, s1
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v2f16_nnan:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm h0, h0, h0
+; CHECK-FP16-NONEON-NEXT: fminnm h1, h1, h1
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v2f16_nnan:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-NOFP16-NEON-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; CHECK-NOFP16-NEON-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v2f16_nnan:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; CHECK-FP16-NEON-NEXT: ret
%z = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %x)
ret <2 x half> %z
}
define <4 x half> @fcanonicalize_v4f16(<4 x half> %x) {
-; AARCH64-LABEL: fcanonicalize_v4f16:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v4f16:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fcvt s0, h0
+; CHECK-NOFP16-NONEON-NEXT: fcvt s1, h1
+; CHECK-NOFP16-NONEON-NEXT: fcvt s2, h2
+; CHECK-NOFP16-NONEON-NEXT: fcvt s3, h3
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-NOFP16-NONEON-NEXT: fminnm s2, s2, s2
+; CHECK-NOFP16-NONEON-NEXT: fminnm s3, s3, s3
+; CHECK-NOFP16-NONEON-NEXT: fcvt h0, s0
+; CHECK-NOFP16-NONEON-NEXT: fcvt h1, s1
+; CHECK-NOFP16-NONEON-NEXT: fcvt h2, s2
+; CHECK-NOFP16-NONEON-NEXT: fcvt h3, s3
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v4f16:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm h0, h0, h0
+; CHECK-FP16-NONEON-NEXT: fminnm h1, h1, h1
+; CHECK-FP16-NONEON-NEXT: fminnm h2, h2, h2
+; CHECK-FP16-NONEON-NEXT: fminnm h3, h3, h3
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v4f16:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-NOFP16-NEON-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; CHECK-NOFP16-NEON-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v4f16:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; CHECK-FP16-NEON-NEXT: ret
%z = call <4 x half> @llvm.canonicalize.v4f16(<4 x half> %x)
ret <4 x half> %z
}
define <4 x half> @fcanonicalize_v4f16_nnan(<4 x half> %x) {
-; AARCH64-LABEL: fcanonicalize_v4f16_nnan:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v4f16_nnan:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fcvt s0, h0
+; CHECK-NOFP16-NONEON-NEXT: fcvt s1, h1
+; CHECK-NOFP16-NONEON-NEXT: fcvt s2, h2
+; CHECK-NOFP16-NONEON-NEXT: fcvt s3, h3
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-NOFP16-NONEON-NEXT: fminnm s2, s2, s2
+; CHECK-NOFP16-NONEON-NEXT: fminnm s3, s3, s3
+; CHECK-NOFP16-NONEON-NEXT: fcvt h0, s0
+; CHECK-NOFP16-NONEON-NEXT: fcvt h1, s1
+; CHECK-NOFP16-NONEON-NEXT: fcvt h2, s2
+; CHECK-NOFP16-NONEON-NEXT: fcvt h3, s3
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v4f16_nnan:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm h0, h0, h0
+; CHECK-FP16-NONEON-NEXT: fminnm h1, h1, h1
+; CHECK-FP16-NONEON-NEXT: fminnm h2, h2, h2
+; CHECK-FP16-NONEON-NEXT: fminnm h3, h3, h3
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v4f16_nnan:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fcvtl v0.4s, v0.4h
+; CHECK-NOFP16-NEON-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; CHECK-NOFP16-NEON-NEXT: fcvtn v0.4h, v0.4s
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v4f16_nnan:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.4h, v0.4h, v0.4h
+; CHECK-FP16-NEON-NEXT: ret
%z = call nnan <4 x half> @llvm.canonicalize.v4f16(<4 x half> %x)
ret <4 x half> %z
}
define <8 x half> @fcanonicalize_v8f16(<8 x half> %x) {
-; AARCH64-LABEL: fcanonicalize_v8f16:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v8f16:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fcvt s0, h0
+; CHECK-NOFP16-NONEON-NEXT: fcvt s1, h1
+; CHECK-NOFP16-NONEON-NEXT: fcvt s2, h2
+; CHECK-NOFP16-NONEON-NEXT: fcvt s3, h3
+; CHECK-NOFP16-NONEON-NEXT: fcvt s4, h4
+; CHECK-NOFP16-NONEON-NEXT: fcvt s5, h5
+; CHECK-NOFP16-NONEON-NEXT: fcvt s6, h6
+; CHECK-NOFP16-NONEON-NEXT: fcvt s7, h7
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-NOFP16-NONEON-NEXT: fminnm s2, s2, s2
+; CHECK-NOFP16-NONEON-NEXT: fminnm s3, s3, s3
+; CHECK-NOFP16-NONEON-NEXT: fminnm s4, s4, s4
+; CHECK-NOFP16-NONEON-NEXT: fminnm s5, s5, s5
+; CHECK-NOFP16-NONEON-NEXT: fminnm s6, s6, s6
+; CHECK-NOFP16-NONEON-NEXT: fminnm s7, s7, s7
+; CHECK-NOFP16-NONEON-NEXT: fcvt h0, s0
+; CHECK-NOFP16-NONEON-NEXT: fcvt h1, s1
+; CHECK-NOFP16-NONEON-NEXT: fcvt h2, s2
+; CHECK-NOFP16-NONEON-NEXT: fcvt h3, s3
+; CHECK-NOFP16-NONEON-NEXT: fcvt h4, s4
+; CHECK-NOFP16-NONEON-NEXT: fcvt h5, s5
+; CHECK-NOFP16-NONEON-NEXT: fcvt h6, s6
+; CHECK-NOFP16-NONEON-NEXT: fcvt h7, s7
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v8f16:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm h0, h0, h0
+; CHECK-FP16-NONEON-NEXT: fminnm h1, h1, h1
+; CHECK-FP16-NONEON-NEXT: fminnm h2, h2, h2
+; CHECK-FP16-NONEON-NEXT: fminnm h3, h3, h3
+; CHECK-FP16-NONEON-NEXT: fminnm h4, h4, h4
+; CHECK-FP16-NONEON-NEXT: fminnm h5, h5, h5
+; CHECK-FP16-NONEON-NEXT: fminnm h6, h6, h6
+; CHECK-FP16-NONEON-NEXT: fminnm h7, h7, h7
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v8f16:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fcvtl v1.4s, v0.4h
+; CHECK-NOFP16-NEON-NEXT: fcvtl2 v2.4s, v0.8h
+; CHECK-NOFP16-NEON-NEXT: fminnm v1.4s, v1.4s, v1.4s
+; CHECK-NOFP16-NEON-NEXT: fcvtn v0.4h, v1.4s
+; CHECK-NOFP16-NEON-NEXT: fminnm v1.4s, v2.4s, v2.4s
+; CHECK-NOFP16-NEON-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v8f16:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.8h, v0.8h, v0.8h
+; CHECK-FP16-NEON-NEXT: ret
%z = call <8 x half> @llvm.canonicalize.v8f16(<8 x half> %x)
ret <8 x half> %z
}
define <8 x half> @fcanonicalize_v8f16_nnan(<8 x half> %x) {
-; AARCH64-LABEL: fcanonicalize_v8f16_nnan:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v8f16_nnan:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fcvt s0, h0
+; CHECK-NOFP16-NONEON-NEXT: fcvt s1, h1
+; CHECK-NOFP16-NONEON-NEXT: fcvt s2, h2
+; CHECK-NOFP16-NONEON-NEXT: fcvt s3, h3
+; CHECK-NOFP16-NONEON-NEXT: fcvt s4, h4
+; CHECK-NOFP16-NONEON-NEXT: fcvt s5, h5
+; CHECK-NOFP16-NONEON-NEXT: fcvt s6, h6
+; CHECK-NOFP16-NONEON-NEXT: fcvt s7, h7
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-NOFP16-NONEON-NEXT: fminnm s2, s2, s2
+; CHECK-NOFP16-NONEON-NEXT: fminnm s3, s3, s3
+; CHECK-NOFP16-NONEON-NEXT: fminnm s4, s4, s4
+; CHECK-NOFP16-NONEON-NEXT: fminnm s5, s5, s5
+; CHECK-NOFP16-NONEON-NEXT: fminnm s6, s6, s6
+; CHECK-NOFP16-NONEON-NEXT: fminnm s7, s7, s7
+; CHECK-NOFP16-NONEON-NEXT: fcvt h0, s0
+; CHECK-NOFP16-NONEON-NEXT: fcvt h1, s1
+; CHECK-NOFP16-NONEON-NEXT: fcvt h2, s2
+; CHECK-NOFP16-NONEON-NEXT: fcvt h3, s3
+; CHECK-NOFP16-NONEON-NEXT: fcvt h4, s4
+; CHECK-NOFP16-NONEON-NEXT: fcvt h5, s5
+; CHECK-NOFP16-NONEON-NEXT: fcvt h6, s6
+; CHECK-NOFP16-NONEON-NEXT: fcvt h7, s7
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v8f16_nnan:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm h0, h0, h0
+; CHECK-FP16-NONEON-NEXT: fminnm h1, h1, h1
+; CHECK-FP16-NONEON-NEXT: fminnm h2, h2, h2
+; CHECK-FP16-NONEON-NEXT: fminnm h3, h3, h3
+; CHECK-FP16-NONEON-NEXT: fminnm h4, h4, h4
+; CHECK-FP16-NONEON-NEXT: fminnm h5, h5, h5
+; CHECK-FP16-NONEON-NEXT: fminnm h6, h6, h6
+; CHECK-FP16-NONEON-NEXT: fminnm h7, h7, h7
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v8f16_nnan:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fcvtl v1.4s, v0.4h
+; CHECK-NOFP16-NEON-NEXT: fcvtl2 v2.4s, v0.8h
+; CHECK-NOFP16-NEON-NEXT: fminnm v1.4s, v1.4s, v1.4s
+; CHECK-NOFP16-NEON-NEXT: fcvtn v0.4h, v1.4s
+; CHECK-NOFP16-NEON-NEXT: fminnm v1.4s, v2.4s, v2.4s
+; CHECK-NOFP16-NEON-NEXT: fcvtn2 v0.8h, v1.4s
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v8f16_nnan:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.8h, v0.8h, v0.8h
+; CHECK-FP16-NEON-NEXT: ret
%z = call nnan <8 x half> @llvm.canonicalize.v8f16(<8 x half> %x)
ret <8 x half> %z
}
define float @fcanonicalize_f32(float %x) {
-; AARCH64-LABEL: fcanonicalize_f32:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm s0, s0, s0
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_f32:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_f32:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_f32:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_f32:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm s0, s0, s0
+; CHECK-FP16-NEON-NEXT: ret
%z = call float @llvm.canonicalize.f32(float %x)
ret float %z
}
define float @fcanonicalize_f32_nnan(float %x) {
-; AARCH64-LABEL: fcanonicalize_f32_nnan:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm s0, s0, s0
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_f32_nnan:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_f32_nnan:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_f32_nnan:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_f32_nnan:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm s0, s0, s0
+; CHECK-FP16-NEON-NEXT: ret
%z = call nnan float @llvm.canonicalize.f32(float %x)
ret float %z
}
define <2 x float> @fcanonicalize_v2f32(<2 x float> %x) {
-; AARCH64-LABEL: fcanonicalize_v2f32:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.2s, v0.2s, v0.2s
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v2f32:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v2f32:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-FP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v2f32:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fminnm v0.2s, v0.2s, v0.2s
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v2f32:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.2s, v0.2s, v0.2s
+; CHECK-FP16-NEON-NEXT: ret
%z = call <2 x float> @llvm.canonicalize.v2f32(<2 x float> %x)
ret <2 x float> %z
}
define <2 x float> @fcanonicalize_v2f32_nnan(<2 x float> %x) {
-; AARCH64-LABEL: fcanonicalize_v2f32_nnan:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.2s, v0.2s, v0.2s
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v2f32_nnan:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v2f32_nnan:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-FP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v2f32_nnan:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fminnm v0.2s, v0.2s, v0.2s
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v2f32_nnan:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.2s, v0.2s, v0.2s
+; CHECK-FP16-NEON-NEXT: ret
%z = call nnan <2 x float> @llvm.canonicalize.v2f32(<2 x float> %x)
ret <2 x float> %z
}
define <4 x float> @fcanonicalize_v4f32(<4 x float> %x) {
-; AARCH64-LABEL: fcanonicalize_v4f32:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v4f32:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-NOFP16-NONEON-NEXT: fminnm s2, s2, s2
+; CHECK-NOFP16-NONEON-NEXT: fminnm s3, s3, s3
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v4f32:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-FP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-FP16-NONEON-NEXT: fminnm s2, s2, s2
+; CHECK-FP16-NONEON-NEXT: fminnm s3, s3, s3
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v4f32:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v4f32:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; CHECK-FP16-NEON-NEXT: ret
%z = call <4 x float> @llvm.canonicalize.v4f32(<4 x float> %x)
ret <4 x float> %z
}
define <4 x float> @fcanonicalize_v4f32_nnan(<4 x float> %x) {
-; AARCH64-LABEL: fcanonicalize_v4f32_nnan:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v4f32_nnan:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-NOFP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-NOFP16-NONEON-NEXT: fminnm s2, s2, s2
+; CHECK-NOFP16-NONEON-NEXT: fminnm s3, s3, s3
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v4f32_nnan:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm s0, s0, s0
+; CHECK-FP16-NONEON-NEXT: fminnm s1, s1, s1
+; CHECK-FP16-NONEON-NEXT: fminnm s2, s2, s2
+; CHECK-FP16-NONEON-NEXT: fminnm s3, s3, s3
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v4f32_nnan:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v4f32_nnan:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.4s, v0.4s, v0.4s
+; CHECK-FP16-NEON-NEXT: ret
%z = call nnan <4 x float> @llvm.canonicalize.v4f32(<4 x float> %x)
ret <4 x float> %z
}
define double @fcanonicalize_f64(double %x) {
-; AARCH64-LABEL: fcanonicalize_f64:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm d0, d0, d0
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_f64:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fminnm d0, d0, d0
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_f64:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm d0, d0, d0
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_f64:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fminnm d0, d0, d0
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_f64:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm d0, d0, d0
+; CHECK-FP16-NEON-NEXT: ret
%z = call double @llvm.canonicalize.f64(double %x)
ret double %z
}
define double @fcanonicalize_f64_nnan(double %x) {
-; AARCH64-LABEL: fcanonicalize_f64_nnan:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm d0, d0, d0
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_f64_nnan:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fminnm d0, d0, d0
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_f64_nnan:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm d0, d0, d0
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_f64_nnan:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fminnm d0, d0, d0
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_f64_nnan:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm d0, d0, d0
+; CHECK-FP16-NEON-NEXT: ret
%z = call nnan double @llvm.canonicalize.f64(double %x)
ret double %z
}
define <2 x double> @fcanonicalize_v2f64(<2 x double> %x) {
-; AARCH64-LABEL: fcanonicalize_v2f64:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v2f64:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fminnm d0, d0, d0
+; CHECK-NOFP16-NONEON-NEXT: fminnm d1, d1, d1
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v2f64:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm d0, d0, d0
+; CHECK-FP16-NONEON-NEXT: fminnm d1, d1, d1
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v2f64:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v2f64:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; CHECK-FP16-NEON-NEXT: ret
%z = call <2 x double> @llvm.canonicalize.v2f64(<2 x double> %x)
ret <2 x double> %z
}
define <2 x double> @fcanonicalize_v2f64_nnan(<2 x double> %x) {
-; AARCH64-LABEL: fcanonicalize_v2f64_nnan:
-; AARCH64: // %bb.0:
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
-; AARCH64-NEXT: ret
+; CHECK-NOFP16-NONEON-LABEL: fcanonicalize_v2f64_nnan:
+; CHECK-NOFP16-NONEON: // %bb.0:
+; CHECK-NOFP16-NONEON-NEXT: fminnm d0, d0, d0
+; CHECK-NOFP16-NONEON-NEXT: fminnm d1, d1, d1
+; CHECK-NOFP16-NONEON-NEXT: ret
+;
+; CHECK-FP16-NONEON-LABEL: fcanonicalize_v2f64_nnan:
+; CHECK-FP16-NONEON: // %bb.0:
+; CHECK-FP16-NONEON-NEXT: fminnm d0, d0, d0
+; CHECK-FP16-NONEON-NEXT: fminnm d1, d1, d1
+; CHECK-FP16-NONEON-NEXT: ret
+;
+; CHECK-NOFP16-NEON-LABEL: fcanonicalize_v2f64_nnan:
+; CHECK-NOFP16-NEON: // %bb.0:
+; CHECK-NOFP16-NEON-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; CHECK-NOFP16-NEON-NEXT: ret
+;
+; CHECK-FP16-NEON-LABEL: fcanonicalize_v2f64_nnan:
+; CHECK-FP16-NEON: // %bb.0:
+; CHECK-FP16-NEON-NEXT: fminnm v0.2d, v0.2d, v0.2d
+; CHECK-FP16-NEON-NEXT: ret
%z = call nnan <2 x double> @llvm.canonicalize.v2f64(<2 x double> %x)
ret <2 x double> %z
}
More information about the llvm-commits
mailing list