[llvm] [NVPTX] Add support for f16 fabs (PR #116107)
Alex MacLean via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 13 13:07:12 PST 2024
https://github.com/AlexMaclean created https://github.com/llvm/llvm-project/pull/116107
Add support for f16 and f16x2 support for abs. See PTX ISA 9.7.4.6. Half Precision Floating Point Instructions: abs https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#half-precision-floating-point-instructions-abs
>From ae0a7634fb95127bfa49f8a223b1c2d5120fe2a0 Mon Sep 17 00:00:00 2001
From: Alex Maclean <amaclean at nvidia.com>
Date: Mon, 11 Nov 2024 04:56:46 +0000
Subject: [PATCH] [NVPTX] Add support for f16 fabs
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 9 +-
llvm/test/CodeGen/NVPTX/f16-abs.ll | 99 +++++++++++++++++++++
2 files changed, 106 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/CodeGen/NVPTX/f16-abs.ll
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 18b05b23da220b..b57af4518b2fc6 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -864,10 +864,15 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
AddPromotedToType(Op, MVT::bf16, MVT::f32);
}
for (const auto &Op : {ISD::FABS}) {
- setOperationAction(Op, MVT::f16, Promote);
setOperationAction(Op, MVT::f32, Legal);
setOperationAction(Op, MVT::f64, Legal);
- setOperationAction(Op, MVT::v2f16, Expand);
+ if (STI.getPTXVersion() >= 65) {
+ setFP16OperationAction(Op, MVT::f16, Legal, Promote);
+ setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
+ } else {
+ setOperationAction(Op, MVT::f16, Promote);
+ setOperationAction(Op, MVT::v2f16, Expand);
+ }
setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
if (getOperationAction(Op, MVT::bf16) == Promote)
diff --git a/llvm/test/CodeGen/NVPTX/f16-abs.ll b/llvm/test/CodeGen/NVPTX/f16-abs.ll
new file mode 100644
index 00000000000000..38cb8ce7ae93aa
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/f16-abs.ll
@@ -0,0 +1,99 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; ## Some FP16 support but not for abs
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 \
+; RUN: -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | FileCheck -allow-deprecated-dag-overlap -check-prefixes CHECK-NOF16 %s
+; RUN: %if ptxas %{ \
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 \
+; RUN: -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | %ptxas-verify -arch=sm_53 \
+; RUN: %}
+
+; ## FP16 support explicitly disabled.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 \
+; RUN: -O0 -disable-post-ra -frame-pointer=all --nvptx-no-f16-math \
+; RUN: -verify-machineinstrs \
+; RUN: | FileCheck -allow-deprecated-dag-overlap -check-prefixes CHECK-NOF16 %s
+; RUN: %if ptxas %{ \
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 \
+; RUN: -O0 -disable-post-ra -frame-pointer=all --nvptx-no-f16-math \
+; RUN: -verify-machineinstrs \
+; RUN: | %ptxas-verify -arch=sm_53 \
+; RUN: %}
+
+; ## FP16 is not supported by hardware.
+; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_52 \
+; RUN: -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | FileCheck -allow-deprecated-dag-overlap -check-prefixes CHECK-NOF16 %s
+; RUN: %if ptxas %{ \
+; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_52 \
+; RUN: -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | %ptxas-verify -arch=sm_52 \
+; RUN: %}
+
+; ## Full FP16 support.
+; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -mattr=+ptx70 \
+; RUN: -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | FileCheck -allow-deprecated-dag-overlap -check-prefixes CHECK-F16-ABS %s
+; RUN: %if ptxas %{ \
+; RUN: llc < %s -O0 -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -mattr=+ptx70 \
+; RUN: -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | %ptxas-verify -arch=sm_53 \
+; RUN: %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+declare half @llvm.fabs.f16(half %a)
+declare <2 x half> @llvm.fabs.v2f16(<2 x half> %a)
+
+define half @test_fabs(half %a) {
+; CHECK-NOF16-LABEL: test_fabs(
+; CHECK-NOF16: {
+; CHECK-NOF16-NEXT: .reg .b16 %rs<3>;
+; CHECK-NOF16-NEXT: .reg .f32 %f<3>;
+; CHECK-NOF16-EMPTY:
+; CHECK-NOF16-NEXT: // %bb.0:
+; CHECK-NOF16-NEXT: ld.param.b16 %rs1, [test_fabs_param_0];
+; CHECK-NOF16-NEXT: cvt.f32.f16 %f1, %rs1;
+; CHECK-NOF16-NEXT: abs.f32 %f2, %f1;
+; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs2, %f2;
+; CHECK-NOF16-NEXT: st.param.b16 [func_retval0], %rs2;
+; CHECK-NOF16-NEXT: ret;
+;
+; CHECK-F16-ABS-LABEL: test_fabs(
+; CHECK-F16-ABS: {
+; CHECK-F16-ABS-NEXT: .reg .b16 %rs<3>;
+; CHECK-F16-ABS-EMPTY:
+; CHECK-F16-ABS-NEXT: // %bb.0:
+; CHECK-F16-ABS-NEXT: ld.param.b16 %rs1, [test_fabs_param_0];
+; CHECK-F16-ABS-NEXT: abs.f16 %rs2, %rs1;
+; CHECK-F16-ABS-NEXT: st.param.b16 [func_retval0], %rs2;
+; CHECK-F16-ABS-NEXT: ret;
+ %r = call half @llvm.fabs.f16(half %a)
+ ret half %r
+}
+
+define <2 x half> @test_fabs_2(<2 x half> %a) #0 {
+; CHECK-F16-LABEL: test_fabs_2(
+; CHECK-F16: {
+; CHECK-F16-NEXT: .reg .b32 %r<5>;
+; CHECK-F16-EMPTY:
+; CHECK-F16-NEXT: // %bb.0:
+; CHECK-F16-NEXT: ld.param.b32 %r1, [test_fabs_2_param_0];
+; CHECK-F16-NEXT: and.b32 %r3, %r1, 2147450879;
+; CHECK-F16-NEXT: st.param.b32 [func_retval0], %r3;
+; CHECK-F16-NEXT: ret;
+;
+; CHECK-F16-ABS-LABEL: test_fabs_2(
+; CHECK-F16-ABS: {
+; CHECK-F16-ABS-NEXT: .reg .b32 %r<3>;
+; CHECK-F16-ABS-EMPTY:
+; CHECK-F16-ABS-NEXT: // %bb.0:
+; CHECK-F16-ABS-NEXT: ld.param.b32 %r1, [test_fabs_2_param_0];
+; CHECK-F16-ABS-NEXT: abs.f16x2 %r2, %r1;
+; CHECK-F16-ABS-NEXT: st.param.b32 [func_retval0], %r2;
+; CHECK-F16-ABS-NEXT: ret;
+ %r = call <2 x half> @llvm.fabs.v2f16(<2 x half> %a)
+ ret <2 x half> %r
+}
+
More information about the llvm-commits
mailing list