[llvm-branch-commits] [llvm] [AMDGPU] Enable ISD::{FSIN, FCOS} custom lowering to work on v2f16 (PR #176382)

Frederik Harwath via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Fri Jan 16 05:51:04 PST 2026


https://github.com/frederik-h created https://github.com/llvm/llvm-project/pull/176382

Currently ISD::FSIN and ISD::FCOS of type MVT::v2f16 are legalized by
first expanding and then using a custom lowering on the resulting f16
instructions. This ordering prevents using packed math variants of the
instructions introduced by the legalization (e.g. the multiplication),
if available, and makes it difficult to eliminate the packing of the
results by using SDWA form; previous attempts to deal with the latter
situation in the si-peephole-sdwa pass were unwieldly since it was
necessary to reconstruct the association between the source and target
vectors.

Change the legalization action for ISD::FSIN and ISD::FCOS of type
MTF::v2f16 to Custom and change the custom intrinsic lowering to deal
with the v2f16 for the intrinsics introduced in this way.

>From 76e8191ddbb87d205692a7c65f59bdde9ae0fb42 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 15 Jan 2026 02:53:01 -0500
Subject: [PATCH] [AMDGPU] Enable ISD::{FSIN,FCOS} custom lowering to work on
 v2f16

Currently ISD::FSIN and ISD::FCOS of type MVT::v2f16 are legalized by
first expanding and then using a custom lowering on the resulting f16
instructions. This ordering prevents using packed math variants of the
instructions introduced by the legalization (e.g. the multiplication),
if available, and makes it difficult to eliminate the packing of the
results by using SDWA form; previous attempts to deal with the latter
situation in the si-peephole-sdwa pass were unwieldly since it was
necessary to reconstruct the association between the source and target
vectors.

Change the legalization action for ISD::FSIN and ISD::FCOS of type
MTF::v2f16 to Custom and change the custom intrinsic lowering to deal
with the v2f16 for the intrinsics introduced in this way.
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 37 +++++++++++--
 llvm/test/CodeGen/AMDGPU/llvm.cos.f16.ll  | 65 ++++++++++-------------
 llvm/test/CodeGen/AMDGPU/llvm.sin.f16.ll  | 65 ++++++++++-------------
 3 files changed, 88 insertions(+), 79 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d2a976197ef70..dc9f59fb129a9 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -655,6 +655,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
           break;
         case ISD::EXTRACT_SUBVECTOR:
         case ISD::CONCAT_VECTORS:
+        case ISD::FSIN:
+	case ISD::FCOS:
           setOperationAction(Op, VT, Custom);
           break;
         default:
@@ -9876,6 +9878,35 @@ SDValue SITargetLowering::lowerWorkitemID(SelectionDAG &DAG, SDValue Op,
                      DAG.getValueType(SmallVT));
 }
 
+/// Helper function for LowerINTRINSIC_WO_CHAIN.  Replace a \p Op of
+/// scalar type with a new node \p NewISD node with one argument which
+/// is the operand at index \p OperandIndex of Op.  Scalarizes for
+/// vector types.
+///
+// FIXME The manual scalarization seems to be necessary because the
+// Expand fallback is not supported for ISD::INTRINSIC_WO_CHAIN and
+// hence the lowering function should not fail for v2f16; see comment
+// in SelectionDAGLegalize::ExpandNode.
+static SDValue BuildScalarizedUnaryOp(SDValue Op, unsigned NewISD,
+                                      unsigned OperandIndex,
+                                      SelectionDAG &DAG) {
+  EVT VT = Op.getValueType();
+  SDLoc DL(Op);
+  SDValue Operand = Op.getOperand(OperandIndex);
+  if (!VT.isVector())
+    return DAG.getNode(NewISD, DL, VT, Operand);
+
+  EVT ScalarVT = VT.getScalarType();
+  unsigned NElts = VT.getVectorNumElements();
+  SmallVector<SDValue, 8> Args;
+
+  DAG.ExtractVectorElements(Operand, Args, 0, NElts);
+  for (unsigned I = 0; I < NElts; ++I)
+    Args[I] = DAG.getNode(NewISD, DL, ScalarVT, Args[I]);
+
+  return DAG.getBuildVector(VT, DL, Args);
+}
+
 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
                                                   SelectionDAG &DAG) const {
   MachineFunction &MF = DAG.getMachineFunction();
@@ -10098,10 +10129,10 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   case Intrinsic::amdgcn_fdiv_fast:
     return lowerFDIV_FAST(Op, DAG);
   case Intrinsic::amdgcn_sin:
-    return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
+    return BuildScalarizedUnaryOp(Op, AMDGPUISD::SIN_HW, 1, DAG);
 
   case Intrinsic::amdgcn_cos:
-    return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
+    return BuildScalarizedUnaryOp(Op, AMDGPUISD::COS_HW, 1, DAG);
 
   case Intrinsic::amdgcn_mul_u24:
     return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1),
@@ -10117,7 +10148,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return emitRemovedIntrinsicError(DAG, DL, VT);
   }
   case Intrinsic::amdgcn_fract:
-    return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
+    return BuildScalarizedUnaryOp(Op, AMDGPUISD::FRACT, 1, DAG);
 
   case Intrinsic::amdgcn_class:
     return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, Op.getOperand(1),
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.cos.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.cos.f16.ll
index b7768998160d5..769bf0a6458b2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.cos.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.cos.f16.ll
@@ -184,14 +184,12 @@ define amdgpu_kernel void @cos_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0x3118
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    global_load_dword v1, v0, s[2:3]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_mul_f16_e32 v3, 0.15915494, v1
-; GFX9-NEXT:    v_mul_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT:    v_cos_f16_e32 v2, v3
-; GFX9-NEXT:    v_cos_f16_e32 v1, v1
+; GFX9-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_cos_f16_e32 v2, v1
+; GFX9-NEXT:    v_cos_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX9-NEXT:    v_pack_b32_f16 v1, v2, v1
 ; GFX9-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX9-NEXT:    s_endpgm
@@ -200,14 +198,12 @@ define amdgpu_kernel void @cos_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; GFX10-NEXT:    v_mov_b32_e32 v2, 0x3118
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    global_load_dword v1, v0, s[2:3]
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_mul_f16_e32 v3, 0.15915494, v1
-; GFX10-NEXT:    v_mul_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX10-NEXT:    v_cos_f16_e32 v2, v3
-; GFX10-NEXT:    v_cos_f16_e32 v1, v1
+; GFX10-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX10-NEXT:    v_cos_f16_e32 v2, v1
+; GFX10-NEXT:    v_cos_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX10-NEXT:    v_pack_b32_f16 v1, v2, v1
 ; GFX10-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX10-NEXT:    s_endpgm
@@ -215,18 +211,16 @@ define amdgpu_kernel void @cos_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX11-TRUE16-LABEL: cos_v2f16:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v1, 0
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX11-TRUE16-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    global_load_b32 v0, v1, s[2:3]
+; GFX11-TRUE16-NEXT:    global_load_b32 v1, v0, s[2:3]
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX11-TRUE16-NEXT:    v_mul_f16_e32 v0.l, 0.15915494, v0.l
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT:    v_mul_f16_e32 v0.h, 0.15915494, v2.l
-; GFX11-TRUE16-NEXT:    v_cos_f16_e32 v0.l, v0.l
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT:    v_cos_f16_e32 v0.h, v0.h
-; GFX11-TRUE16-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX11-TRUE16-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT:    v_cos_f16_e32 v1.l, v1.l
+; GFX11-TRUE16-NEXT:    v_cos_f16_e32 v1.h, v2.l
+; GFX11-TRUE16-NEXT:    global_store_b32 v0, v1, s[0:1]
 ; GFX11-TRUE16-NEXT:    s_endpgm
 ;
 ; GFX11-FAKE16-LABEL: cos_v2f16:
@@ -236,12 +230,10 @@ define amdgpu_kernel void @cos_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX11-FAKE16-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-FAKE16-NEXT:    global_load_b32 v1, v0, s[2:3]
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-FAKE16-NEXT:    v_mul_f16_e32 v1, 0.15915494, v1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_mul_f16_e32 v2, 0.15915494, v2
 ; GFX11-FAKE16-NEXT:    v_cos_f16_e32 v1, v1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_cos_f16_e32 v2, v2
 ; GFX11-FAKE16-NEXT:    s_waitcnt_depctr depctr_va_vdst(0)
 ; GFX11-FAKE16-NEXT:    v_pack_b32_f16 v1, v1, v2
@@ -251,18 +243,16 @@ define amdgpu_kernel void @cos_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX12-TRUE16-LABEL: cos_v2f16:
 ; GFX12-TRUE16:       ; %bb.0:
 ; GFX12-TRUE16-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX12-TRUE16-NEXT:    v_mov_b32_e32 v1, 0
+; GFX12-TRUE16-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX12-TRUE16-NEXT:    s_wait_kmcnt 0x0
-; GFX12-TRUE16-NEXT:    global_load_b32 v0, v1, s[2:3]
+; GFX12-TRUE16-NEXT:    global_load_b32 v1, v0, s[2:3]
 ; GFX12-TRUE16-NEXT:    s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX12-TRUE16-NEXT:    v_mul_f16_e32 v0.l, 0.15915494, v0.l
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT:    v_mul_f16_e32 v0.h, 0.15915494, v2.l
-; GFX12-TRUE16-NEXT:    v_cos_f16_e32 v0.l, v0.l
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT:    v_cos_f16_e32 v0.h, v0.h
-; GFX12-TRUE16-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX12-TRUE16-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT:    v_cos_f16_e32 v1.l, v1.l
+; GFX12-TRUE16-NEXT:    v_cos_f16_e32 v1.h, v2.l
+; GFX12-TRUE16-NEXT:    global_store_b32 v0, v1, s[0:1]
 ; GFX12-TRUE16-NEXT:    s_endpgm
 ;
 ; GFX12-FAKE16-LABEL: cos_v2f16:
@@ -272,13 +262,12 @@ define amdgpu_kernel void @cos_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX12-FAKE16-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-FAKE16-NEXT:    global_load_b32 v1, v0, s[2:3]
 ; GFX12-FAKE16-NEXT:    s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
-; GFX12-FAKE16-NEXT:    v_mul_f16_e32 v1, 0.15915494, v1
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_mul_f16_e32 v2, 0.15915494, v2
 ; GFX12-FAKE16-NEXT:    v_cos_f16_e32 v1, v1
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(TRANS32_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_cos_f16_e32 v2, v2
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(TRANS32_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_pack_b32_f16 v1, v1, v2
 ; GFX12-FAKE16-NEXT:    global_store_b32 v0, v1, s[0:1]
 ; GFX12-FAKE16-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.sin.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.sin.f16.ll
index 56cfec29e4ed2..b7fc76aecf080 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.sin.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.sin.f16.ll
@@ -184,14 +184,12 @@ define amdgpu_kernel void @sin_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0x3118
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    global_load_dword v1, v0, s[2:3]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_mul_f16_e32 v3, 0.15915494, v1
-; GFX9-NEXT:    v_mul_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT:    v_sin_f16_e32 v2, v3
-; GFX9-NEXT:    v_sin_f16_e32 v1, v1
+; GFX9-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_sin_f16_e32 v2, v1
+; GFX9-NEXT:    v_sin_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX9-NEXT:    v_pack_b32_f16 v1, v2, v1
 ; GFX9-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX9-NEXT:    s_endpgm
@@ -200,14 +198,12 @@ define amdgpu_kernel void @sin_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; GFX10-NEXT:    v_mov_b32_e32 v2, 0x3118
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    global_load_dword v1, v0, s[2:3]
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_mul_f16_e32 v3, 0.15915494, v1
-; GFX10-NEXT:    v_mul_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX10-NEXT:    v_sin_f16_e32 v2, v3
-; GFX10-NEXT:    v_sin_f16_e32 v1, v1
+; GFX10-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX10-NEXT:    v_sin_f16_e32 v2, v1
+; GFX10-NEXT:    v_sin_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX10-NEXT:    v_pack_b32_f16 v1, v2, v1
 ; GFX10-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX10-NEXT:    s_endpgm
@@ -215,18 +211,16 @@ define amdgpu_kernel void @sin_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX11-TRUE16-LABEL: sin_v2f16:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v1, 0
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX11-TRUE16-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    global_load_b32 v0, v1, s[2:3]
+; GFX11-TRUE16-NEXT:    global_load_b32 v1, v0, s[2:3]
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX11-TRUE16-NEXT:    v_mul_f16_e32 v0.l, 0.15915494, v0.l
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT:    v_mul_f16_e32 v0.h, 0.15915494, v2.l
-; GFX11-TRUE16-NEXT:    v_sin_f16_e32 v0.l, v0.l
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT:    v_sin_f16_e32 v0.h, v0.h
-; GFX11-TRUE16-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX11-TRUE16-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT:    v_sin_f16_e32 v1.l, v1.l
+; GFX11-TRUE16-NEXT:    v_sin_f16_e32 v1.h, v2.l
+; GFX11-TRUE16-NEXT:    global_store_b32 v0, v1, s[0:1]
 ; GFX11-TRUE16-NEXT:    s_endpgm
 ;
 ; GFX11-FAKE16-LABEL: sin_v2f16:
@@ -236,12 +230,10 @@ define amdgpu_kernel void @sin_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX11-FAKE16-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-FAKE16-NEXT:    global_load_b32 v1, v0, s[2:3]
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-FAKE16-NEXT:    v_mul_f16_e32 v1, 0.15915494, v1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_mul_f16_e32 v2, 0.15915494, v2
 ; GFX11-FAKE16-NEXT:    v_sin_f16_e32 v1, v1
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_sin_f16_e32 v2, v2
 ; GFX11-FAKE16-NEXT:    s_waitcnt_depctr depctr_va_vdst(0)
 ; GFX11-FAKE16-NEXT:    v_pack_b32_f16 v1, v1, v2
@@ -251,18 +243,16 @@ define amdgpu_kernel void @sin_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX12-TRUE16-LABEL: sin_v2f16:
 ; GFX12-TRUE16:       ; %bb.0:
 ; GFX12-TRUE16-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX12-TRUE16-NEXT:    v_mov_b32_e32 v1, 0
+; GFX12-TRUE16-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX12-TRUE16-NEXT:    s_wait_kmcnt 0x0
-; GFX12-TRUE16-NEXT:    global_load_b32 v0, v1, s[2:3]
+; GFX12-TRUE16-NEXT:    global_load_b32 v1, v0, s[2:3]
 ; GFX12-TRUE16-NEXT:    s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX12-TRUE16-NEXT:    v_mul_f16_e32 v0.l, 0.15915494, v0.l
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT:    v_mul_f16_e32 v0.h, 0.15915494, v2.l
-; GFX12-TRUE16-NEXT:    v_sin_f16_e32 v0.l, v0.l
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT:    v_sin_f16_e32 v0.h, v0.h
-; GFX12-TRUE16-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX12-TRUE16-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT:    v_sin_f16_e32 v1.l, v1.l
+; GFX12-TRUE16-NEXT:    v_sin_f16_e32 v1.h, v2.l
+; GFX12-TRUE16-NEXT:    global_store_b32 v0, v1, s[0:1]
 ; GFX12-TRUE16-NEXT:    s_endpgm
 ;
 ; GFX12-FAKE16-LABEL: sin_v2f16:
@@ -272,13 +262,12 @@ define amdgpu_kernel void @sin_v2f16(ptr addrspace(1) %r, ptr addrspace(1) %a) {
 ; GFX12-FAKE16-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-FAKE16-NEXT:    global_load_b32 v1, v0, s[2:3]
 ; GFX12-FAKE16-NEXT:    s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT:    v_pk_mul_f16 v1, v1, 0.15915494 op_sel_hi:[1,0]
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
-; GFX12-FAKE16-NEXT:    v_mul_f16_e32 v1, 0.15915494, v1
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_mul_f16_e32 v2, 0.15915494, v2
 ; GFX12-FAKE16-NEXT:    v_sin_f16_e32 v1, v1
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(TRANS32_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_sin_f16_e32 v2, v2
+; GFX12-FAKE16-NEXT:    s_delay_alu instid0(TRANS32_DEP_1)
 ; GFX12-FAKE16-NEXT:    v_pack_b32_f16 v1, v1, v2
 ; GFX12-FAKE16-NEXT:    global_store_b32 v0, v1, s[0:1]
 ; GFX12-FAKE16-NEXT:    s_endpgm



More information about the llvm-branch-commits mailing list