[llvm] AArch64: Add FMINNUM_IEEE and FMAXNUM_IEEE support (PR #107855)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 30 09:56:52 PDT 2024


================
@@ -0,0 +1,190 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=aarch64 --mattr=+fullfp16 < %s | FileCheck %s --check-prefix=AARCH64
+
+define <2 x double> @max_nnan_v2f64(<2 x double> %a, <2 x double> %b) {
+; AARCH64-LABEL: max_nnan_v2f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <2 x double> @llvm.maximumnum.v2f64(<2 x double> %a, <2 x double> %b)
+  ret <2 x double> %c
+}
+
+define <4 x float> @max_nnan_v4f32(<4 x float> %a, <4 x float> %b) {
+; AARCH64-LABEL: max_nnan_v4f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <4 x float> @llvm.maximumnum.v4f32(<4 x float> %a, <4 x float> %b)
+  ret <4 x float> %c
+}
+
+define <8 x half> @max_nnan_v8f16(<8 x half> %a, <8 x half> %b) {
+; AARCH64-LABEL: max_nnan_v8f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <8 x half> @llvm.maximumnum.v8f16(<8 x half> %a, <8 x half> %b)
+  ret <8 x half> %c
+}
+
+define <4 x double> @max_nnan_v4f64(<4 x double> %a, <4 x double> %b) {
+; AARCH64-LABEL: max_nnan_v4f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm v1.2d, v1.2d, v3.2d
+; AARCH64-NEXT:    fmaxnm v0.2d, v0.2d, v2.2d
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <4 x double> @llvm.maximumnum.v4f64(<4 x double> %a, <4 x double> %b)
+  ret <4 x double> %c
+}
+
+define <8 x float> @max_nnan_v8f32(<8 x float> %a, <8 x float> %b) {
+; AARCH64-LABEL: max_nnan_v8f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm v1.4s, v1.4s, v3.4s
+; AARCH64-NEXT:    fmaxnm v0.4s, v0.4s, v2.4s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <8 x float> @llvm.maximumnum.v8f32(<8 x float> %a, <8 x float> %b)
+  ret <8 x float> %c
+}
+
+define <16 x half> @max_nnan_v16f16(<16 x half> %a, <16 x half> %b) {
+; AARCH64-LABEL: max_nnan_v16f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm v1.8h, v1.8h, v3.8h
+; AARCH64-NEXT:    fmaxnm v0.8h, v0.8h, v2.8h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <16 x half> @llvm.maximumnum.v16f16(<16 x half> %a, <16 x half> %b)
+  ret <16 x half> %c
+}
+
+
+define double @max_nnan_f64(double %a, double %b) {
+; AARCH64-LABEL: max_nnan_f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm d0, d0, d1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan double @llvm.maximumnum.f64(double %a, double %b)
+  ret double %c
+}
+
+define float @max_nnan_f32(float %a, float %b) {
+; AARCH64-LABEL: max_nnan_f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan float @llvm.maximumnum.f32(float %a, float %b)
+  ret float %c
+}
+
+define half @max_nnan_f16(half %a, half %b) {
+; AARCH64-LABEL: max_nnan_f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm h0, h0, h1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan half @llvm.maximumnum.f16(half %a, half %b)
+  ret half %c
+}
+
+define <2 x double> @min_nnan_v2f64(<2 x double> %a, <2 x double> %b) {
+; AARCH64-LABEL: min_nnan_v2f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <2 x double> @llvm.minimumnum.v2f64(<2 x double> %a, <2 x double> %b)
+  ret <2 x double> %c
+}
+
+define <4 x float> @min_nnan_v4f32(<4 x float> %a, <4 x float> %b) {
+; AARCH64-LABEL: min_nnan_v4f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <4 x float> @llvm.minimumnum.v4f32(<4 x float> %a, <4 x float> %b)
+  ret <4 x float> %c
+}
+
+define <8 x half> @min_nnan_v8f16(<8 x half> %a, <8 x half> %b) {
+; AARCH64-LABEL: min_nnan_v8f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <8 x half> @llvm.minimumnum.v8f16(<8 x half> %a, <8 x half> %b)
+  ret <8 x half> %c
+}
+
+define <4 x double> @min_nnan_v4f64(<4 x double> %a, <4 x double> %b) {
+; AARCH64-LABEL: min_nnan_v4f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.2d, v1.2d, v3.2d
+; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v2.2d
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <4 x double> @llvm.minimumnum.v4f64(<4 x double> %a, <4 x double> %b)
+  ret <4 x double> %c
+}
+
+define <8 x float> @min_nnan_v8f32(<8 x float> %a, <8 x float> %b) {
+; AARCH64-LABEL: min_nnan_v8f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.4s, v1.4s, v3.4s
+; AARCH64-NEXT:    fminnm v0.4s, v0.4s, v2.4s
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <8 x float> @llvm.minimumnum.v8f32(<8 x float> %a, <8 x float> %b)
+  ret <8 x float> %c
+}
+
+define <16 x half> @min_nnan_v16f16(<16 x half> %a, <16 x half> %b) {
+; AARCH64-LABEL: min_nnan_v16f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm v1.8h, v1.8h, v3.8h
+; AARCH64-NEXT:    fminnm v0.8h, v0.8h, v2.8h
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan <16 x half> @llvm.minimumnum.v16f16(<16 x half> %a, <16 x half> %b)
+  ret <16 x half> %c
+}
+
+define double @min_nnan_f64(double %a, double %b) {
+; AARCH64-LABEL: min_nnan_f64:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm d0, d0, d1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan double @llvm.minimumnum.f64(double %a, double %b)
+  ret double %c
+}
+
+define float @min_nnan_f32(float %a, float %b) {
+; AARCH64-LABEL: min_nnan_f32:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan float @llvm.minimumnum.f32(float %a, float %b)
+  ret float %c
+}
+
+define half @min_nnan_f16(half %a, half %b) {
+; AARCH64-LABEL: min_nnan_f16:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm h0, h0, h1
+; AARCH64-NEXT:    ret
+entry:
+  %c = call nnan half @llvm.minimumnum.f16(half %a, half %b)
----------------
arsenm wrote:

2 x half and 4 x half aren't tested, plus others 

https://github.com/llvm/llvm-project/pull/107855


More information about the llvm-commits mailing list