[llvm] AArch64: Select FCANONICALIZE (PR #104429)

YunQiang Su via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 10 01:24:16 PDT 2024


https://github.com/wzssyqa updated https://github.com/llvm/llvm-project/pull/104429

>From edd7b19ba1bf0e15a1aba9f427e26a3d70825865 Mon Sep 17 00:00:00 2001
From: YunQiang Su <yunqiang at isrc.iscas.ac.cn>
Date: Thu, 15 Aug 2024 16:40:00 +0800
Subject: [PATCH 1/3] AArch64: Add FCANONICALIZE

FMINNM/FMAXNM instructions of AArch64 follow IEEE754-2008.
We can use them to canonicalize a floating point number.
---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |  17 +
 llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll | 168 ++++++
 .../AArch64/fp-maximumnum-minimumnum.ll       | 560 ++++++++++++++++++
 3 files changed, 745 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 33d05d6039b096..9f9311a545117a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -5063,6 +5063,12 @@ def : Pat<(fmaxnum_ieee (f32 FPR32:$a), (f32 FPR32:$b)),
 def : Pat<(fmaxnum_ieee (f16 FPR16:$a), (f16 FPR16:$b)),
           (FMAXNMHrr FPR16:$a, FPR16:$b)>;
 
+def : Pat<(f16 (fcanonicalize f16:$a)),
+          (FMINNMHrr   f16:$a, f16:$a)>;
+def : Pat<(f32 (fcanonicalize f32:$a)),
+          (FMINNMSrr   f32:$a, f32:$a)>;
+def : Pat<(f64 (fcanonicalize f64:$a)),
+          (FMINNMDrr   f64:$a, f64:$a)>;
 //===----------------------------------------------------------------------===//
 // Floating point three operand instructions.
 //===----------------------------------------------------------------------===//
@@ -5588,6 +5594,17 @@ def : Pat<(v2f32 (fmaxnum_ieee (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
 def : Pat<(v4f16 (fmaxnum_ieee (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
           (v4f16 (FMAXNMv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm)))>;
 
+def : Pat<(v2f64 (fcanonicalize (v2f64 V128:$Rn))),
+          (v2f64 (FMINNMv2f64 (v2f64 V128:$Rn), (v2f64 V128:$Rn)))>;
+def : Pat<(v4f32 (fcanonicalize (v4f32 V128:$Rn))),
+          (v4f32 (FMINNMv4f32 (v4f32 V128:$Rn), (v4f32 V128:$Rn)))>;
+def : Pat<(v8f16 (fcanonicalize (v8f16 V128:$Rn))),
+          (v8f16 (FMINNMv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rn)))>;
+def : Pat<(v2f32 (fcanonicalize (v2f32 V64:$Rn))),
+          (v2f32 (FMINNMv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rn)))>;
+def : Pat<(v4f16 (fcanonicalize (v4f16 V64:$Rn))),
+          (v4f16 (FMINNMv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rn)))>;
+
 // NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
 // instruction expects the addend first, while the fma intrinsic puts it last.
 defm FMLA     : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
diff --git a/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll b/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
new file mode 100644
index 00000000000000..2b442dff2ca805
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=aarch64 --mattr=+fullfp16 < %s | FileCheck %s --check-prefix=AARCH64
+
+declare half @llvm.fcanonicalize.f16(half)
+declare float @llvm.fcanonicalize.f32(float)
+declare double @llvm.fcanonicalize.f64(double)
+
+define half @fcanonicalize_half(half %x) {
+; AARCH64-LABEL: fcanonicalize_half:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm h0, h0, h0
+; AARCH64-NEXT:    ret
+  %z = call half @llvm.canonicalize.f16(half %x)
+  ret half %z
+}
+
+define half @fcanonicalize_half_nnan(half %x) {
+; AARCH64-LABEL: fcanonicalize_half_nnan:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm h0, h0, h0
+; AARCH64-NEXT:    ret
+  %z = call nnan half @llvm.canonicalize.f16(half %x)
+  ret half %z
+}
+
+define <2 x half> @fcanonicalize_v2f16(<2 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f16:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT:    ret
+  %z = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %x)
+  ret <2 x half> %z
+}
+
+define <2 x half> @fcanonicalize_v2f16_nnan(<2 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f16_nnan:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT:    ret
+  %z = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %x)
+  ret <2 x half> %z
+}
+
+define <4 x half> @fcanonicalize_v4f16(<4 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v4f16:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT:    ret
+  %z = call <4 x half> @llvm.canonicalize.v4f16(<4 x half> %x)
+  ret <4 x half> %z
+}
+
+define <4 x half> @fcanonicalize_v4f16_nnan(<4 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v4f16_nnan:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT:    ret
+  %z = call nnan <4 x half> @llvm.canonicalize.v4f16(<4 x half> %x)
+  ret <4 x half> %z
+}
+
+define <8 x half> @fcanonicalize_v8f16(<8 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v8f16:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT:    ret
+  %z = call <8 x half> @llvm.canonicalize.v8f16(<8 x half> %x)
+  ret <8 x half> %z
+}
+
+define <8 x half> @fcanonicalize_v8f16_nnan(<8 x half> %x) {
+; AARCH64-LABEL: fcanonicalize_v8f16_nnan:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT:    ret
+  %z = call nnan <8 x half> @llvm.canonicalize.v8f16(<8 x half> %x)
+  ret <8 x half> %z
+}
+
+define float @fcanonicalize_float(float %x) {
+; AARCH64-LABEL: fcanonicalize_float:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm s0, s0, s0
+; AARCH64-NEXT:    ret
+  %z = call float @llvm.canonicalize.f32(float %x)
+  ret float %z
+}
+
+define float @fcanonicalize_float_nnan(float %x) {
+; AARCH64-LABEL: fcanonicalize_float_nnan:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm s0, s0, s0
+; AARCH64-NEXT:    ret
+  %z = call nnan float @llvm.canonicalize.f32(float %x)
+  ret float %z
+}
+
+define <2 x float> @fcanonicalize_v2f32(<2 x float> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f32:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.2s, v0.2s, v0.2s
+; AARCH64-NEXT:    ret
+  %z = call <2 x float> @llvm.canonicalize.v2f32(<2 x float> %x)
+  ret <2 x float> %z
+}
+
+define <2 x float> @fcanonicalize_v2f32_nnan(<2 x float> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f32_nnan:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.2s, v0.2s, v0.2s
+; AARCH64-NEXT:    ret
+  %z = call nnan <2 x float> @llvm.canonicalize.v2f32(<2 x float> %x)
+  ret <2 x float> %z
+}
+
+define <4 x float> @fcanonicalize_v4f32(<4 x float> %x) {
+; AARCH64-LABEL: fcanonicalize_v4f32:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT:    ret
+  %z = call <4 x float> @llvm.canonicalize.v4f32(<4 x float> %x)
+  ret <4 x float> %z
+}
+
+define <4 x float> @fcanonicalize_v4f32_nnan(<4 x float> %x) {
+; AARCH64-LABEL: fcanonicalize_v4f32_nnan:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT:    ret
+  %z = call nnan <4 x float> @llvm.canonicalize.v4f32(<4 x float> %x)
+  ret <4 x float> %z
+}
+
+define double @fcanonicalize_double(double %x) {
+; AARCH64-LABEL: fcanonicalize_double:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm d0, d0, d0
+; AARCH64-NEXT:    ret
+  %z = call double @llvm.canonicalize.f64(double %x)
+  ret double %z
+}
+
+define double @fcanonicalize_double_nnan(double %x) {
+; AARCH64-LABEL: fcanonicalize_double_nnan:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm d0, d0, d0
+; AARCH64-NEXT:    ret
+  %z = call nnan double @llvm.canonicalize.f64(double %x)
+  ret double %z
+}
+
+define <2 x double> @fcanonicalize_v2f64(<2 x double> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f64:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT:    ret
+  %z = call <2 x double> @llvm.canonicalize.v2f64(<2 x double> %x)
+  ret <2 x double> %z
+}
+
+define <2 x double> @fcanonicalize_v2f64_nnan(<2 x double> %x) {
+; AARCH64-LABEL: fcanonicalize_v2f64_nnan:
+; AARCH64:       // %bb.0:
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT:    ret
+  %z = call nnan <2 x double> @llvm.canonicalize.v2f64(<2 x double> %x)
+  ret <2 x double> %z
+}
diff --git a/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll b/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
index b8406179f3cb32..bb3f9a3e52a16b 100644
--- a/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
+++ b/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
@@ -472,3 +472,563 @@ entry:
   %c = call nnan <16 x half> @llvm.minimumnum.v16f16(<16 x half> %a, <16 x half> %b)
   ret <16 x half> %c
 }
+
+;;;;;;;;;;;;;;;;  max_f64
+define double @max_f64(double %a, double %b) {
+; AARCH64-LABEL: max_f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm d1, d1, d1
+; AARCH64-NEXT:    fminnm d0, d0, d0
+; AARCH64-NEXT:    fmaxnm d0, d0, d1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call double @llvm.maximumnum.f64(double %a, double %b)
+  ret double %c
+}
+
+define <2 x double> @max_v2f64(<2 x double> %a, <2 x double> %b) {
+; AARCH64-LABEL: max_v2f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.2d, v1.2d, v1.2d
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT:    fmaxnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> %a, <2 x double> %b)
+  ret <2 x double> %c
+}
+
+define <3 x double> @max_v3f64(<3 x double> %a, <3 x double> %b) {
+; AARCH64-LABEL: max_v3f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    // kill: def $d3 killed $d3 def $q3
+; AARCH64-NEXT:    // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT:    // kill: def $d4 killed $d4 def $q4
+; AARCH64-NEXT:    // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT:    // kill: def $d2 killed $d2 def $q2
+; AARCH64-NEXT:    // kill: def $d5 killed $d5 def $q5
+; AARCH64-NEXT:    mov v0.d[1], v1.d[0]
+; AARCH64-NEXT:    mov v3.d[1], v4.d[0]
+; AARCH64-NEXT:    fminnm v2.2d, v2.2d, v2.2d
+; AARCH64-NEXT:    fminnm v1.2d, v3.2d, v3.2d
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT:    fmaxnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT:    fminnm v1.2d, v5.2d, v5.2d
+; AARCH64-NEXT:    fmaxnm v2.2d, v2.2d, v1.2d
+; AARCH64-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; AARCH64-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; AARCH64-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; AARCH64-NEXT:    // kill: def $d2 killed $d2 killed $q2
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <3 x double> @llvm.maximumnum.v3f64(<3 x double> %a, <3 x double> %b)
+  ret <3 x double> %c
+}
+
+define <4 x double> @max_v4f64(<4 x double> %a, <4 x double> %b) {
+; AARCH64-LABEL: max_v4f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v2.2d, v2.2d, v2.2d
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT:    fminnm v3.2d, v3.2d, v3.2d
+; AARCH64-NEXT:    fminnm v1.2d, v1.2d, v1.2d
+; AARCH64-NEXT:    fmaxnm v0.2d, v0.2d, v2.2d
+; AARCH64-NEXT:    fmaxnm v1.2d, v1.2d, v3.2d
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <4 x double> @llvm.maximumnum.v4f64(<4 x double> %a, <4 x double> %b)
+  ret <4 x double> %c
+}
+
+;;;;;;;;;;;;;;;;;; max_f32
+define float @max_f32(float %a, float %b) {
+; AARCH64-LABEL: max_f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s1, s1, s1
+; AARCH64-NEXT:    fminnm s0, s0, s0
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call float @llvm.maximumnum.f32(float %a, float %b)
+  ret float %c
+}
+
+define <2 x float> @max_v2f32(<2 x float> %a, <2 x float> %b) {
+; AARCH64-LABEL: max_v2f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.2s, v1.2s, v1.2s
+; AARCH64-NEXT:    fminnm v0.2s, v0.2s, v0.2s
+; AARCH64-NEXT:    fmaxnm v0.2s, v0.2s, v1.2s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> %a, <2 x float> %b)
+  ret <2 x float> %c
+}
+
+define <3 x float> @max_v3f32(<3 x float> %a, <3 x float> %b) {
+; AARCH64-LABEL: max_v3f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT:    fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <3 x float> @llvm.maximumnum.v3f32(<3 x float> %a, <3 x float> %b)
+  ret <3 x float> %c
+}
+
+define <4 x float> @max_v4f32(<4 x float> %a, <4 x float> %b) {
+; AARCH64-LABEL: max_v4f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT:    fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %a, <4 x float> %b)
+  ret <4 x float> %c
+}
+
+define <5 x float> @max_v5f32(<5 x float> %a, <5 x float> %b) {
+; AARCH64-LABEL: max_v5f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    // kill: def $s0 killed $s0 def $q0
+; AARCH64-NEXT:    // kill: def $s5 killed $s5 def $q5
+; AARCH64-NEXT:    // kill: def $s1 killed $s1 def $q1
+; AARCH64-NEXT:    // kill: def $s6 killed $s6 def $q6
+; AARCH64-NEXT:    // kill: def $s2 killed $s2 def $q2
+; AARCH64-NEXT:    // kill: def $s7 killed $s7 def $q7
+; AARCH64-NEXT:    // kill: def $s3 killed $s3 def $q3
+; AARCH64-NEXT:    mov x8, sp
+; AARCH64-NEXT:    // kill: def $s4 killed $s4 def $q4
+; AARCH64-NEXT:    mov v0.s[1], v1.s[0]
+; AARCH64-NEXT:    mov v5.s[1], v6.s[0]
+; AARCH64-NEXT:    mov v0.s[2], v2.s[0]
+; AARCH64-NEXT:    mov v5.s[2], v7.s[0]
+; AARCH64-NEXT:    ldr s2, [sp, #8]
+; AARCH64-NEXT:    fminnm v2.4s, v2.4s, v2.4s
+; AARCH64-NEXT:    mov v0.s[3], v3.s[0]
+; AARCH64-NEXT:    ld1 { v5.s }[3], [x8]
+; AARCH64-NEXT:    fminnm v3.4s, v4.4s, v4.4s
+; AARCH64-NEXT:    fminnm v1.4s, v5.4s, v5.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT:    fmaxnm v4.4s, v3.4s, v2.4s
+; AARCH64-NEXT:    // kill: def $s4 killed $s4 killed $q4
+; AARCH64-NEXT:    fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT:    mov s1, v0.s[1]
+; AARCH64-NEXT:    mov s2, v0.s[2]
+; AARCH64-NEXT:    mov s3, v0.s[3]
+; AARCH64-NEXT:    // kill: def $s0 killed $s0 killed $q0
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <5 x float> @llvm.maximumnum.v5f32(<5 x float> %a, <5 x float> %b)
+  ret <5 x float> %c
+}
+
+define <8 x float> @max_v8f32(<8 x float> %a, <8 x float> %b) {
+; AARCH64-LABEL: max_v8f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v2.4s, v2.4s, v2.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT:    fminnm v3.4s, v3.4s, v3.4s
+; AARCH64-NEXT:    fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT:    fmaxnm v0.4s, v0.4s, v2.4s
+; AARCH64-NEXT:    fmaxnm v1.4s, v1.4s, v3.4s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <8 x float> @llvm.maximumnum.v8f32(<8 x float> %a, <8 x float> %b)
+  ret <8 x float> %c
+}
+
+;;;;;;;;;;;;;;;;;; max_f16
+define half @max_f16(half %a, half %b) {
+; AARCH64-LABEL: max_f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm h1, h1, h1
+; AARCH64-NEXT:    fminnm h0, h0, h0
+; AARCH64-NEXT:    fmaxnm h0, h0, h1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call half @llvm.maximumnum.f16(half %a, half %b)
+  ret half %c
+}
+
+define <2 x half> @max_v2f16(<2 x half> %a, <2 x half> %b) {
+; AARCH64-LABEL: max_v2f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.4h, v1.4h, v1.4h
+; AARCH64-NEXT:    fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT:    fmaxnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <2 x half> @llvm.maximumnum.v2f16(<2 x half> %a, <2 x half> %b)
+  ret <2 x half> %c
+}
+
+define <4 x half> @max_v4f16(<4 x half> %a, <4 x half> %b) {
+; AARCH64-LABEL: max_v4f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.4h, v1.4h, v1.4h
+; AARCH64-NEXT:    fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT:    fmaxnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <4 x half> @llvm.maximumnum.v4f16(<4 x half> %a, <4 x half> %b)
+  ret <4 x half> %c
+}
+
+define <8 x half> @max_v8f16(<8 x half> %a, <8 x half> %b) {
+; AARCH64-LABEL: max_v8f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT:    fmaxnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> %a, <8 x half> %b)
+  ret <8 x half> %c
+}
+
+define <9 x half> @max_v9f16(<9 x half> %a, <9 x half> %b) {
+; AARCH64-LABEL: max_v9f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    // kill: def $h0 killed $h0 def $q0
+; AARCH64-NEXT:    // kill: def $h1 killed $h1 def $q1
+; AARCH64-NEXT:    // kill: def $h2 killed $h2 def $q2
+; AARCH64-NEXT:    add x9, sp, #16
+; AARCH64-NEXT:    // kill: def $h3 killed $h3 def $q3
+; AARCH64-NEXT:    // kill: def $h4 killed $h4 def $q4
+; AARCH64-NEXT:    // kill: def $h5 killed $h5 def $q5
+; AARCH64-NEXT:    // kill: def $h6 killed $h6 def $q6
+; AARCH64-NEXT:    // kill: def $h7 killed $h7 def $q7
+; AARCH64-NEXT:    mov v0.h[1], v1.h[0]
+; AARCH64-NEXT:    ldr h1, [sp, #8]
+; AARCH64-NEXT:    ld1 { v1.h }[1], [x9]
+; AARCH64-NEXT:    add x9, sp, #24
+; AARCH64-NEXT:    mov v0.h[2], v2.h[0]
+; AARCH64-NEXT:    ldr h2, [sp]
+; AARCH64-NEXT:    ld1 { v1.h }[2], [x9]
+; AARCH64-NEXT:    add x9, sp, #32
+; AARCH64-NEXT:    fminnm v2.8h, v2.8h, v2.8h
+; AARCH64-NEXT:    mov v0.h[3], v3.h[0]
+; AARCH64-NEXT:    ld1 { v1.h }[3], [x9]
+; AARCH64-NEXT:    add x9, sp, #40
+; AARCH64-NEXT:    ldr h3, [sp, #72]
+; AARCH64-NEXT:    ld1 { v1.h }[4], [x9]
+; AARCH64-NEXT:    add x9, sp, #48
+; AARCH64-NEXT:    fminnm v3.8h, v3.8h, v3.8h
+; AARCH64-NEXT:    mov v0.h[4], v4.h[0]
+; AARCH64-NEXT:    ld1 { v1.h }[5], [x9]
+; AARCH64-NEXT:    add x9, sp, #56
+; AARCH64-NEXT:    fmaxnm v2.8h, v2.8h, v3.8h
+; AARCH64-NEXT:    mov v0.h[5], v5.h[0]
+; AARCH64-NEXT:    ld1 { v1.h }[6], [x9]
+; AARCH64-NEXT:    add x9, sp, #64
+; AARCH64-NEXT:    str h2, [x8, #16]
+; AARCH64-NEXT:    mov v0.h[6], v6.h[0]
+; AARCH64-NEXT:    ld1 { v1.h }[7], [x9]
+; AARCH64-NEXT:    fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT:    mov v0.h[7], v7.h[0]
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT:    fmaxnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT:    str q0, [x8]
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <9 x half> @llvm.maximumnum.v9f16(<9 x half> %a, <9 x half> %b)
+  ret <9 x half> %c
+}
+
+define <16 x half> @max_v16f16(<16 x half> %a, <16 x half> %b) {
+; AARCH64-LABEL: max_v16f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v2.8h, v2.8h, v2.8h
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT:    fminnm v3.8h, v3.8h, v3.8h
+; AARCH64-NEXT:    fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT:    fmaxnm v0.8h, v0.8h, v2.8h
+; AARCH64-NEXT:    fmaxnm v1.8h, v1.8h, v3.8h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <16 x half> @llvm.maximumnum.v16f16(<16 x half> %a, <16 x half> %b)
+  ret <16 x half> %c
+}
+
+;;;;;;;;;;;;;;;;  min_f64
+define double @min_f64(double %a, double %b) {
+; AARCH64-LABEL: min_f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm d1, d1, d1
+; AARCH64-NEXT:    fminnm d0, d0, d0
+; AARCH64-NEXT:    fminnm d0, d0, d1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call double @llvm.minimumnum.f64(double %a, double %b)
+  ret double %c
+}
+
+define <2 x double> @min_v2f64(<2 x double> %a, <2 x double> %b) {
+; AARCH64-LABEL: min_v2f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.2d, v1.2d, v1.2d
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %a, <2 x double> %b)
+  ret <2 x double> %c
+}
+
+define <3 x double> @min_v3f64(<3 x double> %a, <3 x double> %b) {
+; AARCH64-LABEL: min_v3f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    // kill: def $d3 killed $d3 def $q3
+; AARCH64-NEXT:    // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT:    // kill: def $d4 killed $d4 def $q4
+; AARCH64-NEXT:    // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT:    // kill: def $d2 killed $d2 def $q2
+; AARCH64-NEXT:    // kill: def $d5 killed $d5 def $q5
+; AARCH64-NEXT:    mov v0.d[1], v1.d[0]
+; AARCH64-NEXT:    mov v3.d[1], v4.d[0]
+; AARCH64-NEXT:    fminnm v2.2d, v2.2d, v2.2d
+; AARCH64-NEXT:    fminnm v1.2d, v3.2d, v3.2d
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT:    fminnm v1.2d, v5.2d, v5.2d
+; AARCH64-NEXT:    fminnm v2.2d, v2.2d, v1.2d
+; AARCH64-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; AARCH64-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; AARCH64-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; AARCH64-NEXT:    // kill: def $d2 killed $d2 killed $q2
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <3 x double> @llvm.minimumnum.v3f64(<3 x double> %a, <3 x double> %b)
+  ret <3 x double> %c
+}
+
+define <4 x double> @min_v4f64(<4 x double> %a, <4 x double> %b) {
+; AARCH64-LABEL: min_v4f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v2.2d, v2.2d, v2.2d
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v0.2d
+; AARCH64-NEXT:    fminnm v3.2d, v3.2d, v3.2d
+; AARCH64-NEXT:    fminnm v1.2d, v1.2d, v1.2d
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v2.2d
+; AARCH64-NEXT:    fminnm v1.2d, v1.2d, v3.2d
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <4 x double> @llvm.minimumnum.v4f64(<4 x double> %a, <4 x double> %b)
+  ret <4 x double> %c
+}
+
+;;;;;;;;;;;;;;;;;; min_f32
+define float @min_f32(float %a, float %b) {
+; AARCH64-LABEL: min_f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s1, s1, s1
+; AARCH64-NEXT:    fminnm s0, s0, s0
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call float @llvm.minimumnum.f32(float %a, float %b)
+  ret float %c
+}
+
+define <2 x float> @min_v2f32(<2 x float> %a, <2 x float> %b) {
+; AARCH64-LABEL: min_v2f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.2s, v1.2s, v1.2s
+; AARCH64-NEXT:    fminnm v0.2s, v0.2s, v0.2s
+; AARCH64-NEXT:    fminnm v0.2s, v0.2s, v1.2s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> %a, <2 x float> %b)
+  ret <2 x float> %c
+}
+
+define <3 x float> @min_v3f32(<3 x float> %a, <3 x float> %b) {
+; AARCH64-LABEL: min_v3f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <3 x float> @llvm.minimumnum.v3f32(<3 x float> %a, <3 x float> %b)
+  ret <3 x float> %c
+}
+
+define <4 x float> @min_v4f32(<4 x float> %a, <4 x float> %b) {
+; AARCH64-LABEL: min_v4f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> %a, <4 x float> %b)
+  ret <4 x float> %c
+}
+
+define <5 x float> @min_v5f32(<5 x float> %a, <5 x float> %b) {
+; AARCH64-LABEL: min_v5f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    // kill: def $s0 killed $s0 def $q0
+; AARCH64-NEXT:    // kill: def $s5 killed $s5 def $q5
+; AARCH64-NEXT:    // kill: def $s1 killed $s1 def $q1
+; AARCH64-NEXT:    // kill: def $s6 killed $s6 def $q6
+; AARCH64-NEXT:    // kill: def $s2 killed $s2 def $q2
+; AARCH64-NEXT:    // kill: def $s7 killed $s7 def $q7
+; AARCH64-NEXT:    // kill: def $s3 killed $s3 def $q3
+; AARCH64-NEXT:    mov x8, sp
+; AARCH64-NEXT:    // kill: def $s4 killed $s4 def $q4
+; AARCH64-NEXT:    mov v0.s[1], v1.s[0]
+; AARCH64-NEXT:    mov v5.s[1], v6.s[0]
+; AARCH64-NEXT:    mov v0.s[2], v2.s[0]
+; AARCH64-NEXT:    mov v5.s[2], v7.s[0]
+; AARCH64-NEXT:    ldr s2, [sp, #8]
+; AARCH64-NEXT:    fminnm v2.4s, v2.4s, v2.4s
+; AARCH64-NEXT:    mov v0.s[3], v3.s[0]
+; AARCH64-NEXT:    ld1 { v5.s }[3], [x8]
+; AARCH64-NEXT:    fminnm v3.4s, v4.4s, v4.4s
+; AARCH64-NEXT:    fminnm v1.4s, v5.4s, v5.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT:    fminnm v4.4s, v3.4s, v2.4s
+; AARCH64-NEXT:    // kill: def $s4 killed $s4 killed $q4
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT:    mov s1, v0.s[1]
+; AARCH64-NEXT:    mov s2, v0.s[2]
+; AARCH64-NEXT:    mov s3, v0.s[3]
+; AARCH64-NEXT:    // kill: def $s0 killed $s0 killed $q0
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <5 x float> @llvm.minimumnum.v5f32(<5 x float> %a, <5 x float> %b)
+  ret <5 x float> %c
+}
+
+define <8 x float> @min_v8f32(<8 x float> %a, <8 x float> %b) {
+; AARCH64-LABEL: min_v8f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v2.4s, v2.4s, v2.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v0.4s
+; AARCH64-NEXT:    fminnm v3.4s, v3.4s, v3.4s
+; AARCH64-NEXT:    fminnm v1.4s, v1.4s, v1.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v2.4s
+; AARCH64-NEXT:    fminnm v1.4s, v1.4s, v3.4s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <8 x float> @llvm.minimumnum.v8f32(<8 x float> %a, <8 x float> %b)
+  ret <8 x float> %c
+}
+
+;;;;;;;;;;;;;;;;;; min_f16
+define half @min_f16(half %a, half %b) {
+; AARCH64-LABEL: min_f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm h1, h1, h1
+; AARCH64-NEXT:    fminnm h0, h0, h0
+; AARCH64-NEXT:    fminnm h0, h0, h1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call half @llvm.minimumnum.f16(half %a, half %b)
+  ret half %c
+}
+
+define <2 x half> @min_v2f16(<2 x half> %a, <2 x half> %b) {
+; AARCH64-LABEL: min_v2f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.4h, v1.4h, v1.4h
+; AARCH64-NEXT:    fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT:    fminnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <2 x half> @llvm.minimumnum.v2f16(<2 x half> %a, <2 x half> %b)
+  ret <2 x half> %c
+}
+
+define <4 x half> @min_v4f16(<4 x half> %a, <4 x half> %b) {
+; AARCH64-LABEL: min_v4f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.4h, v1.4h, v1.4h
+; AARCH64-NEXT:    fminnm v0.4h, v0.4h, v0.4h
+; AARCH64-NEXT:    fminnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <4 x half> @llvm.minimumnum.v4f16(<4 x half> %a, <4 x half> %b)
+  ret <4 x half> %c
+}
+
+define <8 x half> @min_v8f16(<8 x half> %a, <8 x half> %b) {
+; AARCH64-LABEL: min_v8f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> %a, <8 x half> %b)
+  ret <8 x half> %c
+}
+
+define <9 x half> @min_v9f16(<9 x half> %a, <9 x half> %b) {
+; AARCH64-LABEL: min_v9f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    // kill: def $h0 killed $h0 def $q0
+; AARCH64-NEXT:    // kill: def $h1 killed $h1 def $q1
+; AARCH64-NEXT:    // kill: def $h2 killed $h2 def $q2
+; AARCH64-NEXT:    add x9, sp, #16
+; AARCH64-NEXT:    // kill: def $h3 killed $h3 def $q3
+; AARCH64-NEXT:    // kill: def $h4 killed $h4 def $q4
+; AARCH64-NEXT:    // kill: def $h5 killed $h5 def $q5
+; AARCH64-NEXT:    // kill: def $h6 killed $h6 def $q6
+; AARCH64-NEXT:    // kill: def $h7 killed $h7 def $q7
+; AARCH64-NEXT:    mov v0.h[1], v1.h[0]
+; AARCH64-NEXT:    ldr h1, [sp, #8]
+; AARCH64-NEXT:    ld1 { v1.h }[1], [x9]
+; AARCH64-NEXT:    add x9, sp, #24
+; AARCH64-NEXT:    mov v0.h[2], v2.h[0]
+; AARCH64-NEXT:    ldr h2, [sp]
+; AARCH64-NEXT:    ld1 { v1.h }[2], [x9]
+; AARCH64-NEXT:    add x9, sp, #32
+; AARCH64-NEXT:    fminnm v2.8h, v2.8h, v2.8h
+; AARCH64-NEXT:    mov v0.h[3], v3.h[0]
+; AARCH64-NEXT:    ld1 { v1.h }[3], [x9]
+; AARCH64-NEXT:    add x9, sp, #40
+; AARCH64-NEXT:    ldr h3, [sp, #72]
+; AARCH64-NEXT:    ld1 { v1.h }[4], [x9]
+; AARCH64-NEXT:    add x9, sp, #48
+; AARCH64-NEXT:    fminnm v3.8h, v3.8h, v3.8h
+; AARCH64-NEXT:    mov v0.h[4], v4.h[0]
+; AARCH64-NEXT:    ld1 { v1.h }[5], [x9]
+; AARCH64-NEXT:    add x9, sp, #56
+; AARCH64-NEXT:    fminnm v2.8h, v2.8h, v3.8h
+; AARCH64-NEXT:    mov v0.h[5], v5.h[0]
+; AARCH64-NEXT:    ld1 { v1.h }[6], [x9]
+; AARCH64-NEXT:    add x9, sp, #64
+; AARCH64-NEXT:    str h2, [x8, #16]
+; AARCH64-NEXT:    mov v0.h[6], v6.h[0]
+; AARCH64-NEXT:    ld1 { v1.h }[7], [x9]
+; AARCH64-NEXT:    fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT:    mov v0.h[7], v7.h[0]
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT:    str q0, [x8]
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <9 x half> @llvm.minimumnum.v9f16(<9 x half> %a, <9 x half> %b)
+  ret <9 x half> %c
+}
+
+define <16 x half> @min_v16f16(<16 x half> %a, <16 x half> %b) {
+; AARCH64-LABEL: min_v16f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v2.8h, v2.8h, v2.8h
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v0.8h
+; AARCH64-NEXT:    fminnm v3.8h, v3.8h, v3.8h
+; AARCH64-NEXT:    fminnm v1.8h, v1.8h, v1.8h
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v2.8h
+; AARCH64-NEXT:    fminnm v1.8h, v1.8h, v3.8h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call <16 x half> @llvm.minimumnum.v16f16(<16 x half> %a, <16 x half> %b)
+  ret <16 x half> %c
+}

>From d314717bc2663cbe44339a0a5538d59e6cf11e8c Mon Sep 17 00:00:00 2001
From: YunQiang Su <wzssyqa at gmail.com>
Date: Thu, 10 Oct 2024 16:23:49 +0800
Subject: [PATCH 2/3] Update llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll

Co-authored-by: Matt Arsenault <arsenm2 at gmail.com>
---
 llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll b/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
index 2b442dff2ca805..a64ebf8ff0387b 100644
--- a/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
+++ b/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
@@ -14,7 +14,7 @@ define half @fcanonicalize_half(half %x) {
   ret half %z
 }
 
-define half @fcanonicalize_half_nnan(half %x) {
+define half @fcanonicalize_f16_nnan(half %x) {
 ; AARCH64-LABEL: fcanonicalize_half_nnan:
 ; AARCH64:       // %bb.0:
 ; AARCH64-NEXT:    fminnm h0, h0, h0

>From 254e55dcc750051a664ee1bb0d0dccf1e535638f Mon Sep 17 00:00:00 2001
From: YunQiang Su <wzssyqa at gmail.com>
Date: Thu, 10 Oct 2024 16:24:01 +0800
Subject: [PATCH 3/3] Update llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll

Co-authored-by: Matt Arsenault <arsenm2 at gmail.com>
---
 llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll b/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
index a64ebf8ff0387b..9eba7e5fa647a5 100644
--- a/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
+++ b/llvm/test/CodeGen/AArch64/fp-fcanonicalize.ll
@@ -5,7 +5,7 @@ declare half @llvm.fcanonicalize.f16(half)
 declare float @llvm.fcanonicalize.f32(float)
 declare double @llvm.fcanonicalize.f64(double)
 
-define half @fcanonicalize_half(half %x) {
+define half @fcanonicalize_f16(half %x) {
 ; AARCH64-LABEL: fcanonicalize_half:
 ; AARCH64:       // %bb.0:
 ; AARCH64-NEXT:    fminnm h0, h0, h0



More information about the llvm-commits mailing list