[llvm] 9f27493 - AMDGPU: Handle the easy parts of strict fptrunc
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Sun Jun 25 16:26:32 PDT 2023
Author: Matt Arsenault
Date: 2023-06-25T19:26:25-04:00
New Revision: 9f274939db08b2dbd68cde130c99c71423fed28c
URL: https://github.com/llvm/llvm-project/commit/9f274939db08b2dbd68cde130c99c71423fed28c
DIFF: https://github.com/llvm/llvm-project/commit/9f274939db08b2dbd68cde130c99c71423fed28c.diff
LOG: AMDGPU: Handle the easy parts of strict fptrunc
f64->f16 is hard. The expansion is all integer but we need
to raise exceptions. Also doesn't handle the illegal f16 targets.
Added:
llvm/test/CodeGen/AMDGPU/strict_fptrunc.ll
Modified:
llvm/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/lib/Target/AMDGPU/VOP1Instructions.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 5a35cb41fbaf9..c6a1523ea3eff 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -520,9 +520,9 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
// F16 - VOP1 Actions.
- setOperationAction(
- {ISD::FP_ROUND, ISD::FCOS, ISD::FSIN, ISD::FROUND, ISD::FPTRUNC_ROUND},
- MVT::f16, Custom);
+ setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND, ISD::FCOS,
+ ISD::FSIN, ISD::FROUND, ISD::FPTRUNC_ROUND},
+ MVT::f16, Custom);
setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP}, MVT::i16, Custom);
@@ -4846,6 +4846,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::BUILD_VECTOR:
return lowerBUILD_VECTOR(Op, DAG);
case ISD::FP_ROUND:
+ case ISD::STRICT_FP_ROUND:
return lowerFP_ROUND(Op, DAG);
case ISD::FPTRUNC_ROUND: {
unsigned Opc;
@@ -5476,6 +5477,10 @@ SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
if (SrcVT != MVT::f64)
return Op;
+ // TODO: Handle strictfp
+ if (Op.getOpcode() != ISD::FP_ROUND)
+ return Op;
+
SDLoc DL(Op);
SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 200de4e39c020..ec38d22670567 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -290,9 +290,9 @@ defm V_CVT_U32_F32 : VOP1Inst <"v_cvt_u32_f32", VOP_I32_F32_SPECIAL_OMOD, fp_to_
defm V_CVT_I32_F32 : VOP1Inst <"v_cvt_i32_f32", VOP_I32_F32_SPECIAL_OMOD, fp_to_sint>;
let FPDPRounding = 1, isReMaterializable = 0 in {
let OtherPredicates = [NotHasTrue16BitInsts] in
- defm V_CVT_F16_F32 : VOP1Inst <"v_cvt_f16_f32", VOP_F16_F32, fpround>;
+ defm V_CVT_F16_F32 : VOP1Inst <"v_cvt_f16_f32", VOP_F16_F32, any_fpround>;
let OtherPredicates = [HasTrue16BitInsts] in
- defm V_CVT_F16_F32_t16 : VOP1Inst <"v_cvt_f16_f32_t16", VOPProfile_True16<VOP_F16_F32>, fpround>;
+ defm V_CVT_F16_F32_t16 : VOP1Inst <"v_cvt_f16_f32_t16", VOPProfile_True16<VOP_F16_F32>, any_fpround>;
} // End FPDPRounding = 1, isReMaterializable = 0
let OtherPredicates = [NotHasTrue16BitInsts] in
diff --git a/llvm/test/CodeGen/AMDGPU/strict_fptrunc.ll b/llvm/test/CodeGen/AMDGPU/strict_fptrunc.ll
new file mode 100644
index 0000000000000..ef1d497ce26a8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/strict_fptrunc.ll
@@ -0,0 +1,254 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; XUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,SI %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx803 < %s | FileCheck -check-prefixes=GCN,GFX89,GFX8 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX89,GFX9 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GCN,GFX1011,GFX10 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GCN,GFX1011,GFX11 %s
+
+define half @v_constrained_fptrunc_f32_to_f16_fpexcept_strict(float %arg) #0 {
+; GFX89-LABEL: v_constrained_fptrunc_f32_to_f16_fpexcept_strict:
+; GFX89: ; %bb.0:
+; GFX89-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX89-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX89-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1011-LABEL: v_constrained_fptrunc_f32_to_f16_fpexcept_strict:
+; GFX1011: ; %bb.0:
+; GFX1011-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1011-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX1011-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1011-NEXT: s_setpc_b64 s[30:31]
+ %val = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret half %val
+}
+
+define <2 x half> @v_constrained_fptrunc_v2f32_to_v2f16_fpexcept_strict(<2 x float> %arg) #0 {
+; GFX8-LABEL: v_constrained_fptrunc_v2f32_to_v2f16_fpexcept_strict:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX8-NEXT: v_cvt_f16_f32_sdwa v1, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_constrained_fptrunc_v2f32_to_v2f16_fpexcept_strict:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX9-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1011-LABEL: v_constrained_fptrunc_v2f32_to_v2f16_fpexcept_strict:
+; GFX1011: ; %bb.0:
+; GFX1011-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1011-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX1011-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1011-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX1011-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1011-NEXT: s_setpc_b64 s[30:31]
+ %val = call <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret <2 x half> %val
+}
+
+define <3 x half> @v_constrained_fptrunc_v3f32_to_v3f16_fpexcept_strict(<3 x float> %arg) #0 {
+; GFX8-LABEL: v_constrained_fptrunc_v3f32_to_v3f16_fpexcept_strict:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cvt_f16_f32_sdwa v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX8-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_constrained_fptrunc_v3f32_to_v3f16_fpexcept_strict:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX9-NEXT: v_cvt_f16_f32_e32 v3, v1
+; GFX9-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v3, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1011-LABEL: v_constrained_fptrunc_v3f32_to_v3f16_fpexcept_strict:
+; GFX1011: ; %bb.0:
+; GFX1011-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1011-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX1011-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1011-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX1011-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1011-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX1011-NEXT: s_setpc_b64 s[30:31]
+ %val = call <3 x half> @llvm.experimental.constrained.fptrunc.v3f16.v3f32(<3 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret <3 x half> %val
+}
+
+define float @v_constrained_fptrunc_f64_to_f32_fpexcept_strict(double %arg) #0 {
+; GFX89-LABEL: v_constrained_fptrunc_f64_to_f32_fpexcept_strict:
+; GFX89: ; %bb.0:
+; GFX89-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX89-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX89-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1011-LABEL: v_constrained_fptrunc_f64_to_f32_fpexcept_strict:
+; GFX1011: ; %bb.0:
+; GFX1011-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1011-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX1011-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1011-NEXT: s_setpc_b64 s[30:31]
+ %val = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret float %val
+}
+
+define <2 x float> @v_constrained_fptrunc_v2f64_to_v2f32_fpexcept_strict(<2 x double> %arg) #0 {
+; GFX89-LABEL: v_constrained_fptrunc_v2f64_to_v2f32_fpexcept_strict:
+; GFX89: ; %bb.0:
+; GFX89-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX89-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX89-NEXT: v_cvt_f32_f64_e32 v1, v[2:3]
+; GFX89-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1011-LABEL: v_constrained_fptrunc_v2f64_to_v2f32_fpexcept_strict:
+; GFX1011: ; %bb.0:
+; GFX1011-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1011-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX1011-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1011-NEXT: v_cvt_f32_f64_e32 v1, v[2:3]
+; GFX1011-NEXT: s_setpc_b64 s[30:31]
+ %val = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret <2 x float> %val
+}
+
+define <3 x float> @v_constrained_fptrunc_v3f64_to_v3f32_fpexcept_strict(<3 x double> %arg) #0 {
+; GFX89-LABEL: v_constrained_fptrunc_v3f64_to_v3f32_fpexcept_strict:
+; GFX89: ; %bb.0:
+; GFX89-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX89-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX89-NEXT: v_cvt_f32_f64_e32 v1, v[2:3]
+; GFX89-NEXT: v_cvt_f32_f64_e32 v2, v[4:5]
+; GFX89-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1011-LABEL: v_constrained_fptrunc_v3f64_to_v3f32_fpexcept_strict:
+; GFX1011: ; %bb.0:
+; GFX1011-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1011-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX1011-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1011-NEXT: v_cvt_f32_f64_e32 v1, v[2:3]
+; GFX1011-NEXT: v_cvt_f32_f64_e32 v2, v[4:5]
+; GFX1011-NEXT: s_setpc_b64 s[30:31]
+ %val = call <3 x float> @llvm.experimental.constrained.fptrunc.v3f32.v3f64(<3 x double> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret <3 x float> %val
+}
+
+; FIXME:
+; define half @v_constrained_fptrunc_f64_to_f16_fpexcept_strict(double %arg) #0 {
+; %val = call half @llvm.experimental.constrained.fptrunc.f16.f64(double %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+; ret half %val
+; }
+
+; define <2 x half> @v_constrained_fptrunc_v2f64_to_v2f16_fpexcept_strict(<2 x double> %arg) #0 {
+; %val = call <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f64(<2 x double> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+; ret <2 x half> %val
+; }
+
+; define <3 x half> @v_constrained_fptrunc_v3f64_to_v3f16_fpexcept_strict(<3 x double> %arg) #0 {
+; %val = call <3 x half> @llvm.experimental.constrained.fptrunc.v3f16.v3f64(<3 x double> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+; ret <3 x half> %val
+; }
+
+define half @v_constrained_fneg_fptrunc_f32_to_f16_fpexcept_strict(float %arg) #0 {
+; GFX89-LABEL: v_constrained_fneg_fptrunc_f32_to_f16_fpexcept_strict:
+; GFX89: ; %bb.0:
+; GFX89-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX89-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX89-NEXT: v_xor_b32_e32 v0, 0x8000, v0
+; GFX89-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1011-LABEL: v_constrained_fneg_fptrunc_f32_to_f16_fpexcept_strict:
+; GFX1011: ; %bb.0:
+; GFX1011-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1011-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX1011-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1011-NEXT: v_xor_b32_e32 v0, 0x8000, v0
+; GFX1011-NEXT: s_setpc_b64 s[30:31]
+ %val = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ %neg.val = fneg half %val
+ ret half %neg.val
+}
+
+define half @v_constrained_fptrunc_fneg_f32_to_f16_fpexcept_strict(float %arg) #0 {
+; GFX89-LABEL: v_constrained_fptrunc_fneg_f32_to_f16_fpexcept_strict:
+; GFX89: ; %bb.0:
+; GFX89-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX89-NEXT: v_cvt_f16_f32_e64 v0, -v0
+; GFX89-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1011-LABEL: v_constrained_fptrunc_fneg_f32_to_f16_fpexcept_strict:
+; GFX1011: ; %bb.0:
+; GFX1011-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1011-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX1011-NEXT: v_cvt_f16_f32_e64 v0, -v0
+; GFX1011-NEXT: s_setpc_b64 s[30:31]
+ %neg.arg = fneg float %arg
+ %val = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %neg.arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret half %val
+}
+
+define float @v_constrained_fneg_fptrunc_f64_to_f32_fpexcept_strict(double %arg) #0 {
+; GFX89-LABEL: v_constrained_fneg_fptrunc_f64_to_f32_fpexcept_strict:
+; GFX89: ; %bb.0:
+; GFX89-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX89-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX89-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; GFX89-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1011-LABEL: v_constrained_fneg_fptrunc_f64_to_f32_fpexcept_strict:
+; GFX1011: ; %bb.0:
+; GFX1011-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1011-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX1011-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1011-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; GFX1011-NEXT: s_setpc_b64 s[30:31]
+ %val = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ %neg.val = fneg float %val
+ ret float %neg.val
+}
+
+define float @v_constrained_fptrunc_fneg_f64_to_f32_fpexcept_strict(double %arg) #0 {
+; GFX89-LABEL: v_constrained_fptrunc_fneg_f64_to_f32_fpexcept_strict:
+; GFX89: ; %bb.0:
+; GFX89-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX89-NEXT: v_cvt_f32_f64_e64 v0, -v[0:1]
+; GFX89-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1011-LABEL: v_constrained_fptrunc_fneg_f64_to_f32_fpexcept_strict:
+; GFX1011: ; %bb.0:
+; GFX1011-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1011-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX1011-NEXT: v_cvt_f32_f64_e64 v0, -v[0:1]
+; GFX1011-NEXT: s_setpc_b64 s[30:31]
+ %neg.arg = fneg double %arg
+ %val = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %neg.arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret float %val
+}
+
+declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata) #1
+declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float>, metadata, metadata) #1
+declare <3 x half> @llvm.experimental.constrained.fptrunc.v3f16.v3f32(<3 x float>, metadata, metadata) #1
+
+declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata) #1
+declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata) #1
+declare <3 x float> @llvm.experimental.constrained.fptrunc.v3f32.v3f64(<3 x double>, metadata, metadata) #1
+
+declare half @llvm.experimental.constrained.fptrunc.f16.f64(double, metadata, metadata) #1
+declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f64(<2 x double>, metadata, metadata) #1
+declare <3 x half> @llvm.experimental.constrained.fptrunc.v3f16.v3f64(<3 x double>, metadata, metadata) #1
+
+attributes #0 = { strictfp }
+attributes #1 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GCN: {{.*}}
+; GFX10: {{.*}}
+; GFX11: {{.*}}
More information about the llvm-commits
mailing list