[llvm] daa022c - Enable roundeven.
Leon Clark via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 20 07:40:37 PST 2022
Author: Leon Clark
Date: 2022-12-20T15:40:20Z
New Revision: daa022ca5707d8ddde987d3c8c2aea6bd9954e3f
URL: https://github.com/llvm/llvm-project/commit/daa022ca5707d8ddde987d3c8c2aea6bd9954e3f
DIFF: https://github.com/llvm/llvm-project/commit/daa022ca5707d8ddde987d3c8c2aea6bd9954e3f.diff
LOG: Enable roundeven.
Add support for roundeven and implement appropriate tests.
Reviewed By: arsenm
Differential Revision: https://reviews.llvm.org/D137954
Added:
llvm/test/CodeGen/AMDGPU/roundeven.ll
Modified:
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
Removed:
llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index a95f4123aa777..27ea9c3913a32 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -332,6 +332,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FNEARBYINT, {MVT::f16, MVT::f32, MVT::f64}, Custom);
+ setOperationAction(ISD::FROUNDEVEN, {MVT::f16, MVT::f32, MVT::f64}, Custom);
+
setOperationAction(ISD::FREM, {MVT::f16, MVT::f32, MVT::f64}, Custom);
if (Subtarget->has16BitInsts())
@@ -1252,6 +1254,8 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
case ISD::FRINT: return LowerFRINT(Op, DAG);
case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
+ case ISD::FROUNDEVEN:
+ return LowerFROUNDEVEN(Op, DAG);
case ISD::FROUND: return LowerFROUND(Op, DAG);
case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
case ISD::FLOG:
@@ -2244,6 +2248,13 @@ SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) con
return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
}
+SDValue AMDGPUTargetLowering::LowerFROUNDEVEN(SDValue Op,
+ SelectionDAG &DAG) const {
+ auto VT = Op.getValueType();
+ auto Arg = Op.getOperand(0u);
+ return DAG.getNode(ISD::FRINT, SDLoc(Op), VT, Arg);
+}
+
// XXX - May require not supporting f32 denormals?
// Don't handle v2f16. The extra instructions to scalarize and repack around the
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index ef8539f8ccc1d..bc3b57a82d086 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -57,6 +57,7 @@ class AMDGPUTargetLowering : public TargetLowering {
SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFROUNDEVEN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLOG(SDValue Op, SelectionDAG &DAG,
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll b/llvm/test/CodeGen/AMDGPU/roundeven.ll
similarity index 51%
rename from llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll
rename to llvm/test/CodeGen/AMDGPU/roundeven.ll
index e263c2e5be17c..0326d4895e853 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll
+++ b/llvm/test/CodeGen/AMDGPU/roundeven.ll
@@ -5,6 +5,12 @@
; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s
; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11 %s
+; RUN: llc -global-isel=0 -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=SDAG_GFX6 %s
+; RUN: llc -global-isel=0 -march=amdgcn -mcpu=hawaii < %s | FileCheck -check-prefix=SDAG_GFX7 %s
+; RUN: llc -global-isel=0 -march=amdgcn -mcpu=fiji < %s | FileCheck -check-prefix=SDAG_GFX8 %s
+; RUN: llc -global-isel=0 -march=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefix=SDAG_GFX9 %s
+; RUN: llc -global-isel=0 -march=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=SDAG_GFX10PLUS,SDAG_GFX10 %s
+; RUN: llc -global-isel=0 -march=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=SDAG_GFX10PLUS,SDAG_GFX11 %s
define float @v_roundeven_f32(float %x) {
; GFX6-LABEL: v_roundeven_f32:
@@ -37,6 +43,37 @@ define float @v_roundeven_f32(float %x) {
; GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10PLUS-NEXT: v_rndne_f32_e32 v0, v0
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_f32:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_f32:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_f32:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_f32:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10PLUS-LABEL: v_roundeven_f32:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%roundeven = call float @llvm.roundeven.f32(float %x)
ret float %roundeven
}
@@ -77,6 +114,42 @@ define <2 x float> @v_roundeven_v2f32(<2 x float> %x) {
; GFX10PLUS-NEXT: v_rndne_f32_e32 v0, v0
; GFX10PLUS-NEXT: v_rndne_f32_e32 v1, v1
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_v2f32:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_v2f32:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_v2f32:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_v2f32:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10PLUS-LABEL: v_roundeven_v2f32:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%roundeven = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %x)
ret <2 x float> %roundeven
}
@@ -122,6 +195,47 @@ define <3 x float> @v_roundeven_v3f32(<3 x float> %x) {
; GFX10PLUS-NEXT: v_rndne_f32_e32 v1, v1
; GFX10PLUS-NEXT: v_rndne_f32_e32 v2, v2
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_v3f32:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_v3f32:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_v3f32:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_v3f32:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10PLUS-LABEL: v_roundeven_v3f32:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%roundeven = call <3 x float> @llvm.roundeven.v3f32(<3 x float> %x)
ret <3 x float> %roundeven
}
@@ -172,6 +286,52 @@ define <4 x float> @v_roundeven_v4f32(<4 x float> %x) {
; GFX10PLUS-NEXT: v_rndne_f32_e32 v2, v2
; GFX10PLUS-NEXT: v_rndne_f32_e32 v3, v3
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_v4f32:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v3, v3
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_v4f32:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v3, v3
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_v4f32:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v3, v3
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_v4f32:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v3, v3
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10PLUS-LABEL: v_roundeven_v4f32:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v3, v3
+; SDAG_GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%roundeven = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %x)
ret <4 x float> %roundeven
}
@@ -211,6 +371,41 @@ define half @v_roundeven_f16(half %x) {
; GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10PLUS-NEXT: v_rndne_f16_e32 v0, v0
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_f16:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_f16:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_f16:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_f16:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10PLUS-LABEL: v_roundeven_f16:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%roundeven = call half @llvm.roundeven.f16(half %x)
ret half %roundeven
}
@@ -274,6 +469,63 @@ define <2 x half> @v_roundeven_v2f16(<2 x half> %x) {
; GFX11-NEXT: v_rndne_f16_e32 v1, v1
; GFX11-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_v2f16:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SDAG_GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG_GFX6-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_v2f16:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SDAG_GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG_GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_v2f16:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f16_sdwa v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX8-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_v2f16:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f16_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX9-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX9-NEXT: v_pack_b32_f16 v0, v0, v1
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10-LABEL: v_roundeven_v2f16:
+; SDAG_GFX10: ; %bb.0:
+; SDAG_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10-NEXT: v_rndne_f16_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX10-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX10-NEXT: v_pack_b32_f16 v0, v0, v1
+; SDAG_GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX11-LABEL: v_roundeven_v2f16:
+; SDAG_GFX11: ; %bb.0:
+; SDAG_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; SDAG_GFX11-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX11-NEXT: v_rndne_f16_e32 v1, v1
+; SDAG_GFX11-NEXT: v_pack_b32_f16 v0, v0, v1
+; SDAG_GFX11-NEXT: s_setpc_b64 s[30:31]
%roundeven = call <2 x half> @llvm.roundeven.v2f16(<2 x half> %x)
ret <2 x half> %roundeven
}
@@ -351,6 +603,71 @@ define <2 x half> @v_roundeven_v2f16_fneg(<2 x half> %x) {
; GFX11-NEXT: v_rndne_f16_e32 v1, v1
; GFX11-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_v2f16_fneg:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SDAG_GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SDAG_GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; SDAG_GFX6-NEXT: v_xor_b32_e32 v0, 0x80008000, v0
+; SDAG_GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; SDAG_GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG_GFX6-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_v2f16_fneg:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SDAG_GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SDAG_GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; SDAG_GFX7-NEXT: v_xor_b32_e32 v0, 0x80008000, v0
+; SDAG_GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; SDAG_GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG_GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_v2f16_fneg:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f16_sdwa v1, -v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX8-NEXT: v_rndne_f16_e64 v0, -v0
+; SDAG_GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_v2f16_fneg:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f16_sdwa v1, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX9-NEXT: v_rndne_f16_e64 v0, -v0
+; SDAG_GFX9-NEXT: v_pack_b32_f16 v0, v0, v1
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10-LABEL: v_roundeven_v2f16_fneg:
+; SDAG_GFX10: ; %bb.0:
+; SDAG_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10-NEXT: v_rndne_f16_sdwa v1, -v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX10-NEXT: v_rndne_f16_e64 v0, -v0
+; SDAG_GFX10-NEXT: v_pack_b32_f16 v0, v0, v1
+; SDAG_GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX11-LABEL: v_roundeven_v2f16_fneg:
+; SDAG_GFX11: ; %bb.0:
+; SDAG_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; SDAG_GFX11-NEXT: v_rndne_f16_e64 v0, -v0
+; SDAG_GFX11-NEXT: v_rndne_f16_e64 v1, -v1
+; SDAG_GFX11-NEXT: v_pack_b32_f16 v0, v0, v1
+; SDAG_GFX11-NEXT: s_setpc_b64 s[30:31]
%x.fneg = fneg <2 x half> %x
%roundeven = call <2 x half> @llvm.roundeven.v2f16(<2 x half> %x.fneg)
ret <2 x half> %roundeven
@@ -441,6 +758,88 @@ define <4 x half> @v_roundeven_v4f16(<4 x half> %x) {
; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX11-NEXT: v_pack_b32_f16 v1, v1, v3
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_v4f16:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SDAG_GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SDAG_GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SDAG_GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG_GFX6-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SDAG_GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SDAG_GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v3, v3
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_v4f16:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SDAG_GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SDAG_GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SDAG_GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SDAG_GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SDAG_GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SDAG_GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v0, v0
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v1, v1
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v2, v2
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v3, v3
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_v4f16:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f16_sdwa v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX8-NEXT: v_rndne_f16_sdwa v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX8-NEXT: v_rndne_f16_e32 v1, v1
+; SDAG_GFX8-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX8-NEXT: v_or_b32_e32 v0, v0, v3
+; SDAG_GFX8-NEXT: v_or_b32_e32 v1, v1, v2
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_v4f16:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f16_sdwa v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX9-NEXT: v_rndne_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX9-NEXT: v_rndne_f16_e32 v1, v1
+; SDAG_GFX9-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX9-NEXT: v_pack_b32_f16 v0, v0, v3
+; SDAG_GFX9-NEXT: v_pack_b32_f16 v1, v1, v2
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10-LABEL: v_roundeven_v4f16:
+; SDAG_GFX10: ; %bb.0:
+; SDAG_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10-NEXT: v_rndne_f16_sdwa v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX10-NEXT: v_rndne_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; SDAG_GFX10-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX10-NEXT: v_rndne_f16_e32 v1, v1
+; SDAG_GFX10-NEXT: v_pack_b32_f16 v0, v0, v3
+; SDAG_GFX10-NEXT: v_pack_b32_f16 v1, v1, v2
+; SDAG_GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX11-LABEL: v_roundeven_v4f16:
+; SDAG_GFX11: ; %bb.0:
+; SDAG_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; SDAG_GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; SDAG_GFX11-NEXT: v_rndne_f16_e32 v1, v1
+; SDAG_GFX11-NEXT: v_rndne_f16_e32 v0, v0
+; SDAG_GFX11-NEXT: v_rndne_f16_e32 v2, v2
+; SDAG_GFX11-NEXT: v_rndne_f16_e32 v3, v3
+; SDAG_GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
+; SDAG_GFX11-NEXT: v_pack_b32_f16 v1, v1, v3
+; SDAG_GFX11-NEXT: s_setpc_b64 s[30:31]
%roundeven = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %x)
ret <4 x half> %roundeven
}
@@ -477,6 +876,37 @@ define float @v_roundeven_f32_fabs(float %x) {
; GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10PLUS-NEXT: v_rndne_f32_e64 v0, |v0|
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_f32_fabs:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_rndne_f32_e64 v0, |v0|
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_f32_fabs:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_rndne_f32_e64 v0, |v0|
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_f32_fabs:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f32_e64 v0, |v0|
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_f32_fabs:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f32_e64 v0, |v0|
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10PLUS-LABEL: v_roundeven_f32_fabs:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e64 v0, |v0|
+; SDAG_GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%fabs.x = call float @llvm.fabs.f32(float %x)
%roundeven = call float @llvm.roundeven.f32(float %fabs.x)
ret float %roundeven
@@ -507,6 +937,31 @@ define amdgpu_ps float @s_roundeven_f32(float inreg %x) {
; GFX10PLUS: ; %bb.0:
; GFX10PLUS-NEXT: v_rndne_f32_e32 v0, s0
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; SDAG_GFX6-LABEL: s_roundeven_f32:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: v_rndne_f32_e32 v0, s0
+; SDAG_GFX6-NEXT: ; return to shader part epilog
+;
+; SDAG_GFX7-LABEL: s_roundeven_f32:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: v_rndne_f32_e32 v0, s0
+; SDAG_GFX7-NEXT: ; return to shader part epilog
+;
+; SDAG_GFX8-LABEL: s_roundeven_f32:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: v_rndne_f32_e32 v0, s0
+; SDAG_GFX8-NEXT: ; return to shader part epilog
+;
+; SDAG_GFX9-LABEL: s_roundeven_f32:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: v_rndne_f32_e32 v0, s0
+; SDAG_GFX9-NEXT: ; return to shader part epilog
+;
+; SDAG_GFX10PLUS-LABEL: s_roundeven_f32:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e32 v0, s0
+; SDAG_GFX10PLUS-NEXT: ; return to shader part epilog
%roundeven = call float @llvm.roundeven.f32(float %x)
ret float %roundeven
}
@@ -542,6 +997,37 @@ define float @v_roundeven_f32_fneg(float %x) {
; GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10PLUS-NEXT: v_rndne_f32_e64 v0, -v0
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_f32_fneg:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_rndne_f32_e64 v0, -v0
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_f32_fneg:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_rndne_f32_e64 v0, -v0
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_f32_fneg:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f32_e64 v0, -v0
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_f32_fneg:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f32_e64 v0, -v0
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10PLUS-LABEL: v_roundeven_f32_fneg:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f32_e64 v0, -v0
+; SDAG_GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%neg.x = fneg float %x
%roundeven = call float @llvm.roundeven.f32(float %neg.x)
ret float %roundeven
@@ -587,6 +1073,47 @@ define double @v_roundeven_f64(double %x) {
; GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10PLUS-NEXT: v_rndne_f64_e32 v[0:1], v[0:1]
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_f64:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: s_brev_b32 s4, -2
+; SDAG_GFX6-NEXT: v_mov_b32_e32 v2, 0x43300000
+; SDAG_GFX6-NEXT: v_bfi_b32 v3, s4, v2, v1
+; SDAG_GFX6-NEXT: v_mov_b32_e32 v2, 0
+; SDAG_GFX6-NEXT: v_add_f64 v[4:5], v[0:1], v[2:3]
+; SDAG_GFX6-NEXT: s_mov_b32 s4, -1
+; SDAG_GFX6-NEXT: s_mov_b32 s5, 0x432fffff
+; SDAG_GFX6-NEXT: v_add_f64 v[2:3], v[4:5], -v[2:3]
+; SDAG_GFX6-NEXT: v_cmp_gt_f64_e64 vcc, |v[0:1]|, s[4:5]
+; SDAG_GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; SDAG_GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_f64:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_rndne_f64_e32 v[0:1], v[0:1]
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_f64:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f64_e32 v[0:1], v[0:1]
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_f64:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f64_e32 v[0:1], v[0:1]
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10PLUS-LABEL: v_roundeven_f64:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f64_e32 v[0:1], v[0:1]
+; SDAG_GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%roundeven = call double @llvm.roundeven.f64(double %x)
ret double %roundeven
}
@@ -632,6 +1159,48 @@ define double @v_roundeven_f64_fneg(double %x) {
; GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10PLUS-NEXT: v_rndne_f64_e64 v[0:1], -v[0:1]
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_f64_fneg:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: v_xor_b32_e32 v6, 0x80000000, v1
+; SDAG_GFX6-NEXT: s_brev_b32 s4, -2
+; SDAG_GFX6-NEXT: v_mov_b32_e32 v2, 0x43300000
+; SDAG_GFX6-NEXT: v_bfi_b32 v3, s4, v2, v6
+; SDAG_GFX6-NEXT: v_mov_b32_e32 v2, 0
+; SDAG_GFX6-NEXT: v_add_f64 v[4:5], -v[0:1], v[2:3]
+; SDAG_GFX6-NEXT: s_mov_b32 s4, -1
+; SDAG_GFX6-NEXT: s_mov_b32 s5, 0x432fffff
+; SDAG_GFX6-NEXT: v_add_f64 v[2:3], v[4:5], -v[2:3]
+; SDAG_GFX6-NEXT: v_cmp_gt_f64_e64 vcc, |v[0:1]|, s[4:5]
+; SDAG_GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; SDAG_GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_f64_fneg:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_rndne_f64_e64 v[0:1], -v[0:1]
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_f64_fneg:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f64_e64 v[0:1], -v[0:1]
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_f64_fneg:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f64_e64 v[0:1], -v[0:1]
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10PLUS-LABEL: v_roundeven_f64_fneg:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f64_e64 v[0:1], -v[0:1]
+; SDAG_GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%neg.x = fneg double %x
%roundeven = call double @llvm.roundeven.f64(double %neg.x)
ret double %roundeven
@@ -688,6 +1257,57 @@ define <2 x double> @v_roundeven_v2f64(<2 x double> %x) {
; GFX10PLUS-NEXT: v_rndne_f64_e32 v[0:1], v[0:1]
; GFX10PLUS-NEXT: v_rndne_f64_e32 v[2:3], v[2:3]
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX6-LABEL: v_roundeven_v2f64:
+; SDAG_GFX6: ; %bb.0:
+; SDAG_GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX6-NEXT: s_brev_b32 s6, -2
+; SDAG_GFX6-NEXT: v_mov_b32_e32 v8, 0x43300000
+; SDAG_GFX6-NEXT: v_bfi_b32 v5, s6, v8, v1
+; SDAG_GFX6-NEXT: v_mov_b32_e32 v4, 0
+; SDAG_GFX6-NEXT: v_add_f64 v[6:7], v[0:1], v[4:5]
+; SDAG_GFX6-NEXT: s_mov_b32 s4, -1
+; SDAG_GFX6-NEXT: s_mov_b32 s5, 0x432fffff
+; SDAG_GFX6-NEXT: v_add_f64 v[5:6], v[6:7], -v[4:5]
+; SDAG_GFX6-NEXT: v_cmp_gt_f64_e64 vcc, |v[0:1]|, s[4:5]
+; SDAG_GFX6-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc
+; SDAG_GFX6-NEXT: v_bfi_b32 v5, s6, v8, v3
+; SDAG_GFX6-NEXT: v_add_f64 v[7:8], v[2:3], v[4:5]
+; SDAG_GFX6-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; SDAG_GFX6-NEXT: v_add_f64 v[4:5], v[7:8], -v[4:5]
+; SDAG_GFX6-NEXT: v_cmp_gt_f64_e64 vcc, |v[2:3]|, s[4:5]
+; SDAG_GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; SDAG_GFX6-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; SDAG_GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX7-LABEL: v_roundeven_v2f64:
+; SDAG_GFX7: ; %bb.0:
+; SDAG_GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX7-NEXT: v_rndne_f64_e32 v[0:1], v[0:1]
+; SDAG_GFX7-NEXT: v_rndne_f64_e32 v[2:3], v[2:3]
+; SDAG_GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX8-LABEL: v_roundeven_v2f64:
+; SDAG_GFX8: ; %bb.0:
+; SDAG_GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX8-NEXT: v_rndne_f64_e32 v[0:1], v[0:1]
+; SDAG_GFX8-NEXT: v_rndne_f64_e32 v[2:3], v[2:3]
+; SDAG_GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX9-LABEL: v_roundeven_v2f64:
+; SDAG_GFX9: ; %bb.0:
+; SDAG_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX9-NEXT: v_rndne_f64_e32 v[0:1], v[0:1]
+; SDAG_GFX9-NEXT: v_rndne_f64_e32 v[2:3], v[2:3]
+; SDAG_GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG_GFX10PLUS-LABEL: v_roundeven_v2f64:
+; SDAG_GFX10PLUS: ; %bb.0:
+; SDAG_GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG_GFX10PLUS-NEXT: s_waitcnt_vscnt null, 0x0
+; SDAG_GFX10PLUS-NEXT: v_rndne_f64_e32 v[0:1], v[0:1]
+; SDAG_GFX10PLUS-NEXT: v_rndne_f64_e32 v[2:3], v[2:3]
+; SDAG_GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%roundeven = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x)
ret <2 x double> %roundeven
}
More information about the llvm-commits
mailing list