[llvm] 483d4da - AMDGPU: Select strict_fma

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 4 14:49:11 PDT 2020


Author: Matt Arsenault
Date: 2020-06-04T17:49:00-04:00
New Revision: 483d4daa5e9b22b553a45eb91fe735fe81f6f067

URL: https://github.com/llvm/llvm-project/commit/483d4daa5e9b22b553a45eb91fe735fe81f6f067
DIFF: https://github.com/llvm/llvm-project/commit/483d4daa5e9b22b553a45eb91fe735fe81f6f067.diff

LOG: AMDGPU: Select strict_fma

Like with strict_fadd, the legalization is scalarizing the v4f16 when
it should split.

Added: 
    llvm/test/CodeGen/AMDGPU/strict_fma.f16.ll
    llvm/test/CodeGen/AMDGPU/strict_fma.f32.ll
    llvm/test/CodeGen/AMDGPU/strict_fma.f64.ll

Modified: 
    llvm/lib/Target/AMDGPU/VOP3Instructions.td
    llvm/lib/Target/AMDGPU/VOP3PInstructions.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 36c68c70169e..00da0e277a2e 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -298,12 +298,12 @@ def V_MAD_F32 : VOP3Inst <"v_mad_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, fmad>;
 
 def V_MAD_I32_I24 : VOP3Inst <"v_mad_i32_i24", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
 def V_MAD_U32_U24 : VOP3Inst <"v_mad_u32_u24", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
-def V_FMA_F32 : VOP3Inst <"v_fma_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, fma>;
+def V_FMA_F32 : VOP3Inst <"v_fma_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, any_fma>;
 def V_LERP_U8 : VOP3Inst <"v_lerp_u8", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_lerp>;
 
 let SchedRW = [WriteDoubleAdd] in {
 let FPDPRounding = 1 in {
-def V_FMA_F64 : VOP3Inst <"v_fma_f64", VOP3_Profile<VOP_F64_F64_F64_F64>, fma>;
+def V_FMA_F64 : VOP3Inst <"v_fma_f64", VOP3_Profile<VOP_F64_F64_F64_F64>, any_fma>;
 def V_ADD_F64 : VOP3Inst <"v_add_f64", VOP3_Profile<VOP_F64_F64_F64>, any_fadd, 1>;
 def V_MUL_F64 : VOP3Inst <"v_mul_f64", VOP3_Profile<VOP_F64_F64_F64>, fmul, 1>;
 } // End FPDPRounding = 1
@@ -468,11 +468,11 @@ def V_DIV_FIXUP_F16_gfx9 : VOP3Inst <"v_div_fixup_f16_gfx9",
   let FPDPRounding = 1;
 }
 
-def V_FMA_F16 : VOP3Inst <"v_fma_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, fma> {
+def V_FMA_F16 : VOP3Inst <"v_fma_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, any_fma> {
   let Predicates = [Has16BitInsts, isGFX8Only];
   let FPDPRounding = 1;
 }
-def V_FMA_F16_gfx9 : VOP3Inst <"v_fma_f16_gfx9", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, fma> {
+def V_FMA_F16_gfx9 : VOP3Inst <"v_fma_f16_gfx9", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, any_fma> {
   let renamedInGFX9 = 1;
   let Predicates = [Has16BitInsts, isGFX9Plus];
   let FPDPRounding = 1;

diff  --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index ad41586b54ee..225be74a3450 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -52,7 +52,7 @@ def V_PK_MAD_I16 : VOP3PInst<"v_pk_mad_i16", VOP3_Profile<VOP_V2I16_V2I16_V2I16_
 def V_PK_MAD_U16 : VOP3PInst<"v_pk_mad_u16", VOP3_Profile<VOP_V2I16_V2I16_V2I16_V2I16>>;
 
 let FPDPRounding = 1 in {
-def V_PK_FMA_F16 : VOP3PInst<"v_pk_fma_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16_V2F16>, fma>;
+def V_PK_FMA_F16 : VOP3PInst<"v_pk_fma_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16_V2F16>, any_fma>;
 def V_PK_ADD_F16 : VOP3PInst<"v_pk_add_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16>, any_fadd>;
 def V_PK_MUL_F16 : VOP3PInst<"v_pk_mul_f16", VOP3_Profile<VOP_V2F16_V2F16_V2F16>, fmul>;
 } // End FPDPRounding = 1

diff  --git a/llvm/test/CodeGen/AMDGPU/strict_fma.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fma.f16.ll
new file mode 100644
index 000000000000..44ac9dab3911
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/strict_fma.f16.ll
@@ -0,0 +1,170 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8 %s
+
+define half @v_constained_fma_f16_fpexcept_strict(half %x, half %y, half %z) #0 {
+; GCN-LABEL: v_constained_fma_f16_fpexcept_strict:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f16 v0, v0, v1, v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %val = call half @llvm.experimental.constrained.fma.f16(half %x, half %y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret half %val
+}
+
+define <2 x half> @v_constained_fma_v2f16_fpexcept_strict(<2 x half> %x, <2 x half> %y, <2 x half> %z) #0 {
+; GFX9-LABEL: v_constained_fma_v2f16_fpexcept_strict:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_fma_f16 v0, v0, v1, v2
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_constained_fma_v2f16_fpexcept_strict:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v5, 16, v0
+; GFX8-NEXT:    v_lshrrev_b32_e32 v3, 16, v2
+; GFX8-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
+; GFX8-NEXT:    v_fma_f16 v3, v5, v4, v3
+; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX8-NEXT:    v_fma_f16 v0, v0, v1, v2
+; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+  %val = call <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half> %x, <2 x half> %y, <2 x half> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <2 x half> %val
+}
+
+define <3 x half> @v_constained_fma_v3f16_fpexcept_strict(<3 x half> %x, <3 x half> %y, <3 x half> %z) #0 {
+; GFX9-LABEL: v_constained_fma_v3f16_fpexcept_strict:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_fma_f16 v0, v0, v2, v4
+; GFX9-NEXT:    v_fma_f16 v1, v1, v3, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_constained_fma_v3f16_fpexcept_strict:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v8, 16, v0
+; GFX8-NEXT:    v_lshrrev_b32_e32 v6, 16, v4
+; GFX8-NEXT:    v_lshrrev_b32_e32 v7, 16, v2
+; GFX8-NEXT:    v_fma_f16 v6, v8, v7, v6
+; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 16, v6
+; GFX8-NEXT:    v_fma_f16 v0, v0, v2, v4
+; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    v_fma_f16 v1, v1, v3, v5
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+  %val = call <3 x half> @llvm.experimental.constrained.fma.v3f16(<3 x half> %x, <3 x half> %y, <3 x half> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <3 x half> %val
+}
+
+define <4 x half> @v_constained_fma_v4f16_fpexcept_strict(<4 x half> %x, <4 x half> %y, <4 x half> %z) #0 {
+; GFX9-LABEL: v_constained_fma_v4f16_fpexcept_strict:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshrrev_b32_e32 v8, 16, v1
+; GFX9-NEXT:    v_lshrrev_b32_e32 v6, 16, v5
+; GFX9-NEXT:    v_lshrrev_b32_e32 v7, 16, v3
+; GFX9-NEXT:    v_fma_f16 v6, v8, v7, v6
+; GFX9-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
+; GFX9-NEXT:    v_lshrrev_b32_e32 v7, 16, v4
+; GFX9-NEXT:    v_lshrrev_b32_e32 v8, 16, v2
+; GFX9-NEXT:    v_fma_f16 v0, v0, v2, v4
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0xffff
+; GFX9-NEXT:    v_fma_f16 v1, v1, v3, v5
+; GFX9-NEXT:    v_and_b32_e32 v1, v2, v1
+; GFX9-NEXT:    v_fma_f16 v7, v9, v8, v7
+; GFX9-NEXT:    v_and_b32_e32 v0, v2, v0
+; GFX9-NEXT:    v_lshl_or_b32 v0, v7, 16, v0
+; GFX9-NEXT:    v_lshl_or_b32 v1, v6, 16, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_constained_fma_v4f16_fpexcept_strict:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v8, 16, v1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v6, 16, v5
+; GFX8-NEXT:    v_lshrrev_b32_e32 v7, 16, v3
+; GFX8-NEXT:    v_fma_f16 v6, v8, v7, v6
+; GFX8-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
+; GFX8-NEXT:    v_lshrrev_b32_e32 v7, 16, v4
+; GFX8-NEXT:    v_lshrrev_b32_e32 v8, 16, v2
+; GFX8-NEXT:    v_fma_f16 v7, v9, v8, v7
+; GFX8-NEXT:    v_fma_f16 v0, v0, v2, v4
+; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 16, v7
+; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    v_fma_f16 v1, v1, v3, v5
+; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 16, v6
+; GFX8-NEXT:    v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+  %val = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %x, <4 x half> %y, <4 x half> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <4 x half> %val
+}
+
+define half @v_constained_fma_f16_fpexcept_strict_fneg(half %x, half %y, half %z) #0 {
+; GCN-LABEL: v_constained_fma_f16_fpexcept_strict_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f16 v0, v0, v1, -v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.z = fneg half %z
+  %val = call half @llvm.experimental.constrained.fma.f16(half %x, half %y, half %neg.z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret half %val
+}
+
+define half @v_constained_fma_f16_fpexcept_strict_fneg_fneg(half %x, half %y, half %z) #0 {
+; GCN-LABEL: v_constained_fma_f16_fpexcept_strict_fneg_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f16 v0, -v0, -v1, v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.x = fneg half %x
+  %neg.y = fneg half %y
+  %val = call half @llvm.experimental.constrained.fma.f16(half %neg.x, half %neg.y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret half %val
+}
+
+define half @v_constained_fma_f16_fpexcept_strict_fabs_fabs(half %x, half %y, half %z) #0 {
+; GCN-LABEL: v_constained_fma_f16_fpexcept_strict_fabs_fabs:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f16 v0, |v0|, |v1|, v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.x = call half @llvm.fabs.f16(half %x)
+  %neg.y = call half @llvm.fabs.f16(half %y)
+  %val = call half @llvm.experimental.constrained.fma.f16(half %neg.x, half %neg.y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret half %val
+}
+
+define <2 x half> @v_constained_fma_v2f16_fpexcept_strict_fneg_fneg(<2 x half> %x, <2 x half> %y, <2 x half> %z) #0 {
+; GFX9-LABEL: v_constained_fma_v2f16_fpexcept_strict_fneg_fneg:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_fma_f16 v0, v0, v1, v2 neg_lo:[1,1,0] neg_hi:[1,1,0]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_constained_fma_v2f16_fpexcept_strict_fneg_fneg:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v5, 16, v0
+; GFX8-NEXT:    v_lshrrev_b32_e32 v3, 16, v2
+; GFX8-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
+; GFX8-NEXT:    v_fma_f16 v3, -v5, -v4, v3
+; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX8-NEXT:    v_fma_f16 v0, -v0, -v1, v2
+; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+  %neg.x = fneg <2 x half> %x
+  %neg.y = fneg <2 x half> %y
+  %val = call <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half> %neg.x, <2 x half> %neg.y, <2 x half> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <2 x half> %val
+}
+
+declare half @llvm.fabs.f16(half) #1
+declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata, metadata) #1
+declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata) #1
+declare <3 x half> @llvm.experimental.constrained.fma.v3f16(<3 x half>, <3 x half>, <3 x half>, metadata, metadata) #1
+declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata) #1
+
+attributes #0 = { strictfp }
+attributes #1 = { inaccessiblememonly nounwind willreturn }

diff  --git a/llvm/test/CodeGen/AMDGPU/strict_fma.f32.ll b/llvm/test/CodeGen/AMDGPU/strict_fma.f32.ll
new file mode 100644
index 000000000000..31d81b02527b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/strict_fma.f32.ll
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s
+
+define float @v_constained_fma_f32_fpexcept_strict(float %x, float %y, float %z) #0 {
+; GCN-LABEL: v_constained_fma_f32_fpexcept_strict:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f32 v0, v0, v1, v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %val = call float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret float %val
+}
+
+define <2 x float> @v_constained_fma_v2f32_fpexcept_strict(<2 x float> %x, <2 x float> %y, <2 x float> %z) #0 {
+; GCN-LABEL: v_constained_fma_v2f32_fpexcept_strict:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f32 v0, v0, v2, v4
+; GCN-NEXT:    v_fma_f32 v1, v1, v3, v5
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %val = call <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float> %x, <2 x float> %y, <2 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <2 x float> %val
+}
+
+define <3 x float> @v_constained_fma_v3f32_fpexcept_strict(<3 x float> %x, <3 x float> %y, <3 x float> %z) #0 {
+; GCN-LABEL: v_constained_fma_v3f32_fpexcept_strict:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f32 v0, v0, v3, v6
+; GCN-NEXT:    v_fma_f32 v1, v1, v4, v7
+; GCN-NEXT:    v_fma_f32 v2, v2, v5, v8
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %val = call <3 x float> @llvm.experimental.constrained.fma.v3f32(<3 x float> %x, <3 x float> %y, <3 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <3 x float> %val
+}
+
+define <4 x float> @v_constained_fma_v4f32_fpexcept_strict(<4 x float> %x, <4 x float> %y, <4 x float> %z) #0 {
+; GCN-LABEL: v_constained_fma_v4f32_fpexcept_strict:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f32 v0, v0, v4, v8
+; GCN-NEXT:    v_fma_f32 v1, v1, v5, v9
+; GCN-NEXT:    v_fma_f32 v2, v2, v6, v10
+; GCN-NEXT:    v_fma_f32 v3, v3, v7, v11
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %val = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <4 x float> %val
+}
+
+define float @v_constained_fma_f32_fpexcept_strict_fneg(float %x, float %y, float %z) #0 {
+; GCN-LABEL: v_constained_fma_f32_fpexcept_strict_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f32 v0, v0, v1, -v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.z = fneg float %z
+  %val = call float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %neg.z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret float %val
+}
+
+define float @v_constained_fma_f32_fpexcept_strict_fneg_fneg(float %x, float %y, float %z) #0 {
+; GCN-LABEL: v_constained_fma_f32_fpexcept_strict_fneg_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f32 v0, -v0, -v1, v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.x = fneg float %x
+  %neg.y = fneg float %y
+  %val = call float @llvm.experimental.constrained.fma.f32(float %neg.x, float %neg.y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret float %val
+}
+
+define float @v_constained_fma_f32_fpexcept_strict_fabs_fabs(float %x, float %y, float %z) #0 {
+; GCN-LABEL: v_constained_fma_f32_fpexcept_strict_fabs_fabs:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f32 v0, |v0|, |v1|, v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.x = call float @llvm.fabs.f32(float %x)
+  %neg.y = call float @llvm.fabs.f32(float %y)
+  %val = call float @llvm.experimental.constrained.fma.f32(float %neg.x, float %neg.y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret float %val
+}
+
+define <2 x float> @v_constained_fma_v2f32_fpexcept_strict_fneg_fneg(<2 x float> %x, <2 x float> %y, <2 x float> %z) #0 {
+; GCN-LABEL: v_constained_fma_v2f32_fpexcept_strict_fneg_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f32 v0, -v0, -v2, v4
+; GCN-NEXT:    v_fma_f32 v1, -v1, -v3, v5
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.x = fneg <2 x float> %x
+  %neg.y = fneg <2 x float> %y
+  %val = call <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float> %neg.x, <2 x float> %neg.y, <2 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <2 x float> %val
+}
+
+declare float @llvm.fabs.f32(float) #1
+declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) #1
+declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata) #1
+declare <3 x float> @llvm.experimental.constrained.fma.v3f32(<3 x float>, <3 x float>, <3 x float>, metadata, metadata) #1
+declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata) #1
+
+attributes #0 = { strictfp }
+attributes #1 = { inaccessiblememonly nounwind willreturn }

diff  --git a/llvm/test/CodeGen/AMDGPU/strict_fma.f64.ll b/llvm/test/CodeGen/AMDGPU/strict_fma.f64.ll
new file mode 100644
index 000000000000..e458b592f5a1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/strict_fma.f64.ll
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s
+
+define double @v_constained_fma_f64_fpexcept_strict(double %x, double %y, double %z) #0 {
+; GCN-LABEL: v_constained_fma_f64_fpexcept_strict:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f64 v[0:1], v[0:1], v[2:3], v[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %val = call double @llvm.experimental.constrained.fma.f64(double %x, double %y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret double %val
+}
+
+define <2 x double> @v_constained_fma_v2f64_fpexcept_strict(<2 x double> %x, <2 x double> %y, <2 x double> %z) #0 {
+; GCN-LABEL: v_constained_fma_v2f64_fpexcept_strict:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f64 v[0:1], v[0:1], v[4:5], v[8:9]
+; GCN-NEXT:    v_fma_f64 v[2:3], v[2:3], v[6:7], v[10:11]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %val = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <2 x double> %val
+}
+
+define <3 x double> @v_constained_fma_v3f64_fpexcept_strict(<3 x double> %x, <3 x double> %y, <3 x double> %z) #0 {
+; GCN-LABEL: v_constained_fma_v3f64_fpexcept_strict:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f64 v[0:1], v[0:1], v[6:7], v[12:13]
+; GCN-NEXT:    v_fma_f64 v[2:3], v[2:3], v[8:9], v[14:15]
+; GCN-NEXT:    v_fma_f64 v[4:5], v[4:5], v[10:11], v[16:17]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %val = call <3 x double> @llvm.experimental.constrained.fma.v3f64(<3 x double> %x, <3 x double> %y, <3 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <3 x double> %val
+}
+
+define <4 x double> @v_constained_fma_v4f64_fpexcept_strict(<4 x double> %x, <4 x double> %y, <4 x double> %z) #0 {
+; GCN-LABEL: v_constained_fma_v4f64_fpexcept_strict:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f64 v[0:1], v[0:1], v[8:9], v[16:17]
+; GCN-NEXT:    v_fma_f64 v[2:3], v[2:3], v[10:11], v[18:19]
+; GCN-NEXT:    v_fma_f64 v[4:5], v[4:5], v[12:13], v[20:21]
+; GCN-NEXT:    v_fma_f64 v[6:7], v[6:7], v[14:15], v[22:23]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %val = call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %x, <4 x double> %y, <4 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <4 x double> %val
+}
+
+define double @v_constained_fma_f64_fpexcept_strict_fneg(double %x, double %y, double %z) #0 {
+; GCN-LABEL: v_constained_fma_f64_fpexcept_strict_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f64 v[0:1], v[0:1], v[2:3], -v[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.z = fneg double %z
+  %val = call double @llvm.experimental.constrained.fma.f64(double %x, double %y, double %neg.z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret double %val
+}
+
+define double @v_constained_fma_f64_fpexcept_strict_fneg_fneg(double %x, double %y, double %z) #0 {
+; GCN-LABEL: v_constained_fma_f64_fpexcept_strict_fneg_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f64 v[0:1], -v[0:1], -v[2:3], v[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.x = fneg double %x
+  %neg.y = fneg double %y
+  %val = call double @llvm.experimental.constrained.fma.f64(double %neg.x, double %neg.y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret double %val
+}
+
+define double @v_constained_fma_f64_fpexcept_strict_fabs_fabs(double %x, double %y, double %z) #0 {
+; GCN-LABEL: v_constained_fma_f64_fpexcept_strict_fabs_fabs:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f64 v[0:1], |v[0:1]|, |v[2:3]|, v[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.x = call double @llvm.fabs.f64(double %x)
+  %neg.y = call double @llvm.fabs.f64(double %y)
+  %val = call double @llvm.experimental.constrained.fma.f64(double %neg.x, double %neg.y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret double %val
+}
+
+define <2 x double> @v_constained_fma_v2f64_fpexcept_strict_fneg_fneg(<2 x double> %x, <2 x double> %y, <2 x double> %z) #0 {
+; GCN-LABEL: v_constained_fma_v2f64_fpexcept_strict_fneg_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_fma_f64 v[0:1], -v[0:1], -v[4:5], v[8:9]
+; GCN-NEXT:    v_fma_f64 v[2:3], -v[2:3], -v[6:7], v[10:11]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %neg.x = fneg <2 x double> %x
+  %neg.y = fneg <2 x double> %y
+  %val = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %neg.x, <2 x double> %neg.y, <2 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  ret <2 x double> %val
+}
+
+declare double @llvm.fabs.f64(double) #1
+declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) #1
+declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata) #1
+declare <3 x double> @llvm.experimental.constrained.fma.v3f64(<3 x double>, <3 x double>, <3 x double>, metadata, metadata) #1
+declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata) #1
+
+attributes #0 = { strictfp }
+attributes #1 = { inaccessiblememonly nounwind willreturn }


        


More information about the llvm-commits mailing list