[llvm] AMDGPU: Custom lower fptrunc vectors for f32 -> f16 (PR #141883)
Changpeng Fang via llvm-commits
llvm-commits at lists.llvm.org
Thu May 29 15:05:54 PDT 2025
https://github.com/changpeng updated https://github.com/llvm/llvm-project/pull/141883
>From 85ecbf71e69a5768e1977afaba619ed5c6cd57ae Mon Sep 17 00:00:00 2001
From: Changpeng Fang <changpeng.fang at amd.com>
Date: Wed, 28 May 2025 17:20:18 -0700
Subject: [PATCH 1/4] AMDGPU: Custom lower vector fptrunc of f32 -> f16
GFx950+ supports v_cvt_pk_f16_f32. However current implementation
of vector fptrunc lowering fully scalarizes the vector, and the scalar
conversions may not always be combined to generate the packed one.
We made v2f32 -> v2f16 legal in https://github.com/llvm/llvm-project/pull/139956.
This work is an extension to handle wider vectors. Instead of fully scalarization,
we split the vector to packs (v2f32 -> v2f16) to ensure the packed conversion can always
been generated.
---
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 23 +++-
llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h | 2 +
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 40 +++++-
llvm/lib/Target/AMDGPU/SIISelLowering.h | 1 +
.../AMDGPU/fptrunc.v2f16.no.fast.math.ll | 120 +++++++++++++++++-
5 files changed, 172 insertions(+), 14 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 177750b639c67..9e9c0e5e18a6d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1061,9 +1061,10 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
}
auto &FPTruncActions = getActionDefinitionsBuilder(G_FPTRUNC);
- if (ST.hasCvtPkF16F32Inst())
- FPTruncActions.legalFor({{S32, S64}, {S16, S32}, {V2S16, V2S32}});
- else
+ if (ST.hasCvtPkF16F32Inst()) {
+ FPTruncActions.legalFor({{S32, S64}, {S16, S32}, {V2S16, V2S32}})
+ .customFor({{V4S16, V4S32}, {V8S16, V8S32}});
+ } else
FPTruncActions.legalFor({{S32, S64}, {S16, S32}});
FPTruncActions.scalarize(0).lower();
@@ -2163,6 +2164,8 @@ bool AMDGPULegalizerInfo::legalizeCustom(
case TargetOpcode::G_FMINNUM_IEEE:
case TargetOpcode::G_FMAXNUM_IEEE:
return legalizeMinNumMaxNum(Helper, MI);
+ case TargetOpcode::G_FPTRUNC:
+ return legalizeFPTrunc(Helper, MI, MRI);
case TargetOpcode::G_EXTRACT_VECTOR_ELT:
return legalizeExtractVectorElt(MI, MRI, B);
case TargetOpcode::G_INSERT_VECTOR_ELT:
@@ -2749,6 +2752,20 @@ bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(LegalizerHelper &Helper,
return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
}
+bool AMDGPULegalizerInfo::legalizeFPTrunc(LegalizerHelper &Helper,
+ MachineInstr &MI,
+ MachineRegisterInfo &MRI) const {
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ assert (DstTy.isVector() && DstTy.getNumElements() > 2);
+ LLT EltTy = DstTy.getElementType();
+ assert (EltTy == S16 && "Only handle vectors of half");
+
+ // Split vector to packs.
+ return Helper.fewerElementsVector(MI, 0, LLT::fixed_vector(2, EltTy)) ==
+ LegalizerHelper::Legalized;
+}
+
bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index 1f4e02b0d600a..faab326f53fcf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -56,6 +56,8 @@ class AMDGPULegalizerInfo final : public LegalizerInfo {
bool legalizeFPTOI(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B, bool Signed) const;
bool legalizeMinNumMaxNum(LegalizerHelper &Helper, MachineInstr &MI) const;
+ bool legalizeFPTrunc(LegalizerHelper &Helper, MachineInstr &MI,
+ MachineRegisterInfo &MRI) const;
bool legalizeExtractVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const;
bool legalizeInsertVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 74ca3e43fce3a..8d68e5a0d064a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -919,8 +919,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BUILD_VECTOR, MVT::v2bf16, Legal);
}
- if (Subtarget->hasCvtPkF16F32Inst())
- setOperationAction(ISD::FP_ROUND, MVT::v2f16, Custom);
+ if (Subtarget->hasCvtPkF16F32Inst()) {
+ setOperationAction(ISD::FP_ROUND, {MVT::v2f16, MVT::v4f16, MVT::v8f16},
+ Custom);
+ }
setTargetDAGCombine({ISD::ADD,
ISD::UADDO_CARRY,
@@ -6900,14 +6902,44 @@ SDValue SITargetLowering::getFPExtOrFPRound(SelectionDAG &DAG, SDValue Op,
DAG.getTargetConstant(0, DL, MVT::i32));
}
+SDValue SITargetLowering::SplitFP_ROUNDVectorToPacks(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned Opc = Op.getOpcode();
+ EVT DstVT = Op.getValueType();
+ unsigned NumElts = DstVT.getVectorNumElements();
+ assert (NumElts % 2 == 0 && "Only handle vectors of even number of elements");
+ if (NumElts == 2) // already packed.
+ return Op;
+
+ SDValue Src = Op.getOperand(0);
+ EVT SrcVT = Src.getValueType();
+ LLVMContext &Context = *DAG.getContext();
+ EVT SrcPkVT = EVT::getVectorVT(Context, SrcVT.getScalarType(), 2);
+ EVT DstPkVT = EVT::getVectorVT(Context, DstVT.getScalarType(), 2);
+
+ SDLoc DL(Op);
+ SmallVector<SDValue, 16> Packs;
+ for (unsigned Index = 0; Index < NumElts; Index +=2) {
+ SDValue PkSrc = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SrcPkVT, Src,
+ DAG.getConstant(Index, DL, MVT::i32));
+ SDValue PkDst = DAG.getNode(Opc, DL, DstPkVT, PkSrc,
+ DAG.getTargetConstant(0, DL, MVT::i32));
+ Packs.push_back(PkDst);
+ }
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Packs);
+}
+
SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
SDValue Src = Op.getOperand(0);
EVT SrcVT = Src.getValueType();
EVT DstVT = Op.getValueType();
- if (DstVT == MVT::v2f16) {
+ if (DstVT.isVector() && DstVT.getScalarType() == MVT::f16) {
assert(Subtarget->hasCvtPkF16F32Inst() && "support v_cvt_pk_f16_f32");
- return SrcVT == MVT::v2f32 ? Op : SDValue();
+ if (SrcVT.getScalarType() != MVT::f32)
+ return SDValue();
+ return SplitFP_ROUNDVectorToPacks(Op, DAG);
}
if (SrcVT.getScalarType() != MVT::f64)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index c42366a1c04c8..1c5ec0e5a25a1 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -145,6 +145,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
/// Custom lowering for ISD::FP_ROUND for MVT::f16.
SDValue lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue SplitFP_ROUNDVectorToPacks(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFMINNUM_FMAXNUM(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFMINIMUM_FMAXIMUM(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFLDEXP(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.v2f16.no.fast.math.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.v2f16.no.fast.math.ll
index e5815e96fbe33..b4c8dac3ff518 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.v2f16.no.fast.math.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.v2f16.no.fast.math.ll
@@ -12,18 +12,124 @@ define <2 x half> @v_test_cvt_v2f32_v2f16(<2 x float> %src) {
ret <2 x half> %res
}
-define half @fptrunc_v2f32_v2f16_then_extract(<2 x float> %src) {
-; GFX950-LABEL: fptrunc_v2f32_v2f16_then_extract:
+define <4 x half> @v_test_cvt_v4f32_v4f16(<4 x float> %src) {
+; GFX950-LABEL: v_test_cvt_v4f32_v4f16:
; GFX950: ; %bb.0:
; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
-; GFX950-NEXT: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX950-NEXT: v_cvt_pk_f16_f32 v1, v2, v3
+; GFX950-NEXT: s_setpc_b64 s[30:31]
+ %res = fptrunc <4 x float> %src to <4 x half>
+ ret <4 x half> %res
+}
+
+define <8 x half> @v_test_cvt_v8f32_v2f16(<8 x float> %src) {
+; GFX950-LABEL: v_test_cvt_v8f32_v2f16:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX950-NEXT: v_cvt_pk_f16_f32 v1, v2, v3
+; GFX950-NEXT: v_cvt_pk_f16_f32 v2, v4, v5
+; GFX950-NEXT: v_cvt_pk_f16_f32 v3, v6, v7
+; GFX950-NEXT: s_setpc_b64 s[30:31]
+ %res = fptrunc <8 x float> %src to <8 x half>
+ ret <8 x half> %res
+}
+
+define half @fptrunc_v2f32_v2f16_extract_uses(<2 x float> %src) {
+; GFX950-LABEL: fptrunc_v2f32_v2f16_extract_uses:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX950-NEXT: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX950-NEXT: s_setpc_b64 s[30:31]
%vec_half = fptrunc <2 x float> %src to <2 x half>
- %first = extractelement <2 x half> %vec_half, i64 1
- %second = extractelement <2 x half> %vec_half, i64 0
- %res = fadd half %first, %second
- ret half %res
+ %f0 = extractelement <2 x half> %vec_half, i64 0
+ %f1 = extractelement <2 x half> %vec_half, i64 1
+ %rslt = fadd half %f0, %f1
+ ret half %rslt
+}
+
+define half @fptrunc_v4f32_v4f16_extract_uses(<4 x float> %vec_float) {
+; GFX950-SDAG-LABEL: fptrunc_v4f32_v4f16_extract_uses:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v2, v2, v3
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v1, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX950-GISEL-LABEL: fptrunc_v4f32_v4f16_extract_uses:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v1, v2, v3
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %vec_half = fptrunc <4 x float> %vec_float to <4 x half>
+ %f0 = extractelement <4 x half> %vec_half, i64 0
+ %f1 = extractelement <4 x half> %vec_half, i64 1
+ %f2 = extractelement <4 x half> %vec_half, i64 2
+ %f3 = extractelement <4 x half> %vec_half, i64 3
+ %sum0 = fadd half %f0, %f1
+ %sum1 = fadd half %f2, %f3
+ %rslt = fadd half %sum0, %sum1
+ ret half %rslt
+}
+
+define half @fptrunc_v8f32_v8f16_extract_uses(<8 x float> %vec_float) {
+; GFX950-SDAG-LABEL: fptrunc_v8f32_v8f16_extract_uses:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v6, v6, v7
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v4, v4, v5
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v2, v2, v3
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v1, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v2, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v3, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-SDAG-NEXT: v_add_f16_e32 v1, v2, v3
+; GFX950-SDAG-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX950-GISEL-LABEL: fptrunc_v8f32_v8f16_extract_uses:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v1, v2, v3
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v2, v4, v5
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v3, v6, v7
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-GISEL-NEXT: v_add_f16_e32 v1, v2, v3
+; GFX950-GISEL-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %vec_half = fptrunc <8 x float> %vec_float to <8 x half>
+ %f0 = extractelement <8 x half> %vec_half, i64 0
+ %f1 = extractelement <8 x half> %vec_half, i64 1
+ %f2 = extractelement <8 x half> %vec_half, i64 2
+ %f3 = extractelement <8 x half> %vec_half, i64 3
+ %f4 = extractelement <8 x half> %vec_half, i64 4
+ %f5 = extractelement <8 x half> %vec_half, i64 5
+ %f6 = extractelement <8 x half> %vec_half, i64 6
+ %f7 = extractelement <8 x half> %vec_half, i64 7
+ %sum0 = fadd half %f0, %f1
+ %sum1 = fadd half %f2, %f3
+ %sum2 = fadd half %f4, %f5
+ %sum3 = fadd half %f6, %f7
+ %sum4 = fadd half %sum0, %sum1
+ %sum5 = fadd half %sum2, %sum3
+ %rslt = fadd half %sum4, %sum5
+ ret half %rslt
}
define <2 x half> @v_test_cvt_v2f64_v2f16(<2 x double> %src) {
>From 8de1b0c0d8f1c28d4f5a8cc1298142bd2e283f4a Mon Sep 17 00:00:00 2001
From: Changpeng Fang <changpeng.fang at amd.com>
Date: Wed, 28 May 2025 20:41:40 -0700
Subject: [PATCH 2/4] AMDGPU: Custom lower vector fptrunc of f32 -> f16
GFx950+ supports v_cvt_pk_f16_f32. However current implementation
of vector fptrunc lowering fully scalarizes the vector, and the scalar
conversions may not always be combined to generate the packed one.
We made v2f32 -> v2f16 legal in https://github.com/llvm/llvm-project/pull/139956.
This work is an extension to handle wider vectors. Instead of fully scalarization,
we split the vector to packs (v2f32 -> v2f16) to ensure the packed conversion can always
been generated.
Fix clang-format
---
llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 12 +++++++-----
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 ++--
2 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 9e9c0e5e18a6d..e1c6550305cca 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1063,9 +1063,10 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
auto &FPTruncActions = getActionDefinitionsBuilder(G_FPTRUNC);
if (ST.hasCvtPkF16F32Inst()) {
FPTruncActions.legalFor({{S32, S64}, {S16, S32}, {V2S16, V2S32}})
- .customFor({{V4S16, V4S32}, {V8S16, V8S32}});
- } else
+ .customFor({{V4S16, V4S32}, {V8S16, V8S32}});
+ } else {
FPTruncActions.legalFor({{S32, S64}, {S16, S32}});
+ }
FPTruncActions.scalarize(0).lower();
getActionDefinitionsBuilder(G_FPEXT)
@@ -2757,12 +2758,13 @@ bool AMDGPULegalizerInfo::legalizeFPTrunc(LegalizerHelper &Helper,
MachineRegisterInfo &MRI) const {
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
- assert (DstTy.isVector() && DstTy.getNumElements() > 2);
+ assert(DstTy.isVector() && DstTy.getNumElements() > 2);
LLT EltTy = DstTy.getElementType();
- assert (EltTy == S16 && "Only handle vectors of half");
+ assert(EltTy == S16 && "Only handle vectors of half");
// Split vector to packs.
- return Helper.fewerElementsVector(MI, 0, LLT::fixed_vector(2, EltTy)) ==
+ LLT PkTy = LLT::fixed_vector(2, EltTy);
+ return Helper.fewerElementsVector(MI, /*TypeIdx=*/0, PkTy) ==
LegalizerHelper::Legalized;
}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 8d68e5a0d064a..b9e528637a6fd 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6907,7 +6907,7 @@ SDValue SITargetLowering::SplitFP_ROUNDVectorToPacks(SDValue Op,
unsigned Opc = Op.getOpcode();
EVT DstVT = Op.getValueType();
unsigned NumElts = DstVT.getVectorNumElements();
- assert (NumElts % 2 == 0 && "Only handle vectors of even number of elements");
+ assert(NumElts % 2 == 0 && "Only handle vectors of even number of elements");
if (NumElts == 2) // already packed.
return Op;
@@ -6919,7 +6919,7 @@ SDValue SITargetLowering::SplitFP_ROUNDVectorToPacks(SDValue Op,
SDLoc DL(Op);
SmallVector<SDValue, 16> Packs;
- for (unsigned Index = 0; Index < NumElts; Index +=2) {
+ for (unsigned Index = 0; Index < NumElts; Index += 2) {
SDValue PkSrc = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SrcPkVT, Src,
DAG.getConstant(Index, DL, MVT::i32));
SDValue PkDst = DAG.getNode(Opc, DL, DstPkVT, PkSrc,
>From 4ecace6dffbf0c58523fff54c87054b752dd3141 Mon Sep 17 00:00:00 2001
From: Changpeng Fang <changpeng.fang at amd.com>
Date: Thu, 29 May 2025 14:42:59 -0700
Subject: [PATCH 3/4] AMDGPU: Custom lower vector fptrunc of f32 -> f16
GFx950+ supports v_cvt_pk_f16_f32. However current implementation
of vector fptrunc lowering fully scalarizes the vector, and the scalar
conversions may not always be combined to generate the packed one.
We made v2f32 -> v2f16 legal in https://github.com/llvm/llvm-project/pull/139956.
This work is an extension to handle wider vectors. Instead of fully scalarization,
we split the vector to packs (v2f32 -> v2f16) to ensure the packed conversion can always
been generated.
NOTE:
1) re-implement the helper function the sam way as splitUnaryVectorOp;
2) Use fewerElementsIf instead of custom lowering on the global isel path
3) add checks for 3x and 16x vectors
---
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 28 ++--
llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h | 2 -
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 37 ++--
llvm/lib/Target/AMDGPU/SIISelLowering.h | 2 +-
.../AMDGPU/fptrunc.v2f16.no.fast.math.ll | 158 ++++++++++++++++++
5 files changed, 183 insertions(+), 44 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index e1c6550305cca..a407081a640af 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -223,6 +223,13 @@ static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
};
}
+static LegalityPredicate numElementsPowerOf2(unsigned TypeIdx) {
+ return [=](const LegalityQuery &Query) {
+ const LLT QueryTy = Query.Types[TypeIdx];
+ return QueryTy.isVector() && isPowerOf2_32(QueryTy.getNumElements());
+ };
+}
+
static bool isRegisterSize(const GCNSubtarget &ST, unsigned Size) {
return ((ST.useRealTrue16Insts() && Size == 16) || Size % 32 == 0) &&
Size <= MaxRegisterSize;
@@ -1063,7 +1070,9 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
auto &FPTruncActions = getActionDefinitionsBuilder(G_FPTRUNC);
if (ST.hasCvtPkF16F32Inst()) {
FPTruncActions.legalFor({{S32, S64}, {S16, S32}, {V2S16, V2S32}})
- .customFor({{V4S16, V4S32}, {V8S16, V8S32}});
+ .fewerElementsIf(all(elementTypeIs(0, S16), vectorWiderThan(0, 32),
+ numElementsPowerOf2(0), elementTypeIs(1, S32)),
+ changeTo(0, V2S16));
} else {
FPTruncActions.legalFor({{S32, S64}, {S16, S32}});
}
@@ -2165,8 +2174,6 @@ bool AMDGPULegalizerInfo::legalizeCustom(
case TargetOpcode::G_FMINNUM_IEEE:
case TargetOpcode::G_FMAXNUM_IEEE:
return legalizeMinNumMaxNum(Helper, MI);
- case TargetOpcode::G_FPTRUNC:
- return legalizeFPTrunc(Helper, MI, MRI);
case TargetOpcode::G_EXTRACT_VECTOR_ELT:
return legalizeExtractVectorElt(MI, MRI, B);
case TargetOpcode::G_INSERT_VECTOR_ELT:
@@ -2753,21 +2760,6 @@ bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(LegalizerHelper &Helper,
return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
}
-bool AMDGPULegalizerInfo::legalizeFPTrunc(LegalizerHelper &Helper,
- MachineInstr &MI,
- MachineRegisterInfo &MRI) const {
- Register DstReg = MI.getOperand(0).getReg();
- LLT DstTy = MRI.getType(DstReg);
- assert(DstTy.isVector() && DstTy.getNumElements() > 2);
- LLT EltTy = DstTy.getElementType();
- assert(EltTy == S16 && "Only handle vectors of half");
-
- // Split vector to packs.
- LLT PkTy = LLT::fixed_vector(2, EltTy);
- return Helper.fewerElementsVector(MI, /*TypeIdx=*/0, PkTy) ==
- LegalizerHelper::Legalized;
-}
-
bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index faab326f53fcf..1f4e02b0d600a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -56,8 +56,6 @@ class AMDGPULegalizerInfo final : public LegalizerInfo {
bool legalizeFPTOI(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B, bool Signed) const;
bool legalizeMinNumMaxNum(LegalizerHelper &Helper, MachineInstr &MI) const;
- bool legalizeFPTrunc(LegalizerHelper &Helper, MachineInstr &MI,
- MachineRegisterInfo &MRI) const;
bool legalizeExtractVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const;
bool legalizeInsertVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b9e528637a6fd..4064758c7fcaa 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -920,8 +920,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
}
if (Subtarget->hasCvtPkF16F32Inst()) {
- setOperationAction(ISD::FP_ROUND, {MVT::v2f16, MVT::v4f16, MVT::v8f16},
- Custom);
+ setOperationAction(ISD::FP_ROUND, {MVT::v2f16, MVT::v4f16, MVT::v8f16,
+ MVT::v16f16}, Custom);
}
setTargetDAGCombine({ISD::ADD,
@@ -6902,32 +6902,23 @@ SDValue SITargetLowering::getFPExtOrFPRound(SelectionDAG &DAG, SDValue Op,
DAG.getTargetConstant(0, DL, MVT::i32));
}
-SDValue SITargetLowering::SplitFP_ROUNDVectorToPacks(SDValue Op,
- SelectionDAG &DAG) const {
- unsigned Opc = Op.getOpcode();
+SDValue SITargetLowering::splitFP_ROUNDVectorOp(SDValue Op,
+ SelectionDAG &DAG) const {
EVT DstVT = Op.getValueType();
unsigned NumElts = DstVT.getVectorNumElements();
- assert(NumElts % 2 == 0 && "Only handle vectors of even number of elements");
- if (NumElts == 2) // already packed.
- return Op;
+ assert(isPowerOf2_32(NumElts) && "Number of elements must be power of 2");
- SDValue Src = Op.getOperand(0);
- EVT SrcVT = Src.getValueType();
- LLVMContext &Context = *DAG.getContext();
- EVT SrcPkVT = EVT::getVectorVT(Context, SrcVT.getScalarType(), 2);
- EVT DstPkVT = EVT::getVectorVT(Context, DstVT.getScalarType(), 2);
+ auto [Lo, Hi] = DAG.SplitVectorOperand(Op.getNode(), 0);
SDLoc DL(Op);
- SmallVector<SDValue, 16> Packs;
- for (unsigned Index = 0; Index < NumElts; Index += 2) {
- SDValue PkSrc = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SrcPkVT, Src,
- DAG.getConstant(Index, DL, MVT::i32));
- SDValue PkDst = DAG.getNode(Opc, DL, DstPkVT, PkSrc,
- DAG.getTargetConstant(0, DL, MVT::i32));
- Packs.push_back(PkDst);
- }
+ unsigned Opc = Op.getOpcode();
+ SDValue Flags = Op.getOperand(1);
+ EVT HalfDstVT = EVT::getVectorVT(*DAG.getContext(), DstVT.getScalarType(),
+ NumElts / 2);
+ SDValue OpLo = DAG.getNode(Opc, DL, HalfDstVT, Lo, Flags);
+ SDValue OpHi = DAG.getNode(Opc, DL, HalfDstVT, Hi, Flags);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Packs);
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, OpLo, OpHi);
}
SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
@@ -6939,7 +6930,7 @@ SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
assert(Subtarget->hasCvtPkF16F32Inst() && "support v_cvt_pk_f16_f32");
if (SrcVT.getScalarType() != MVT::f32)
return SDValue();
- return SplitFP_ROUNDVectorToPacks(Op, DAG);
+ return DstVT == MVT::v2f16 ? Op : splitFP_ROUNDVectorOp(Op, DAG);
}
if (SrcVT.getScalarType() != MVT::f64)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 1c5ec0e5a25a1..a1ae42f4efd5f 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -145,7 +145,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
/// Custom lowering for ISD::FP_ROUND for MVT::f16.
SDValue lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
- SDValue SplitFP_ROUNDVectorToPacks(SDValue Op, SelectionDAG &DAG) const;
+ SDValue splitFP_ROUNDVectorOp(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFMINNUM_FMAXNUM(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFMINIMUM_FMAXIMUM(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFLDEXP(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.v2f16.no.fast.math.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.v2f16.no.fast.math.ll
index b4c8dac3ff518..8184d23f65474 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.v2f16.no.fast.math.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.v2f16.no.fast.math.ll
@@ -12,6 +12,27 @@ define <2 x half> @v_test_cvt_v2f32_v2f16(<2 x float> %src) {
ret <2 x half> %res
}
+define <3 x half> @v_test_cvt_v3f32_v3f16(<3 x float> %src) {
+; GFX950-SDAG-LABEL: v_test_cvt_v3f32_v3f16:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX950-SDAG-NEXT: v_mov_b32_e32 v1, v2
+; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX950-GISEL-LABEL: v_test_cvt_v3f32_v3f16:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX950-GISEL-NEXT: v_cvt_f16_f32_e32 v3, v1
+; GFX950-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX950-GISEL-NEXT: v_pack_b32_f16 v0, v0, v3
+; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %res = fptrunc <3 x float> %src to <3 x half>
+ ret <3 x half> %res
+}
+
define <4 x half> @v_test_cvt_v4f32_v4f16(<4 x float> %src) {
; GFX950-LABEL: v_test_cvt_v4f32_v4f16:
; GFX950: ; %bb.0:
@@ -36,6 +57,23 @@ define <8 x half> @v_test_cvt_v8f32_v2f16(<8 x float> %src) {
ret <8 x half> %res
}
+define <16 x half> @v_test_cvt_v16f32_v16f16(<16 x float> %src) {
+; GFX950-LABEL: v_test_cvt_v16f32_v16f16:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX950-NEXT: v_cvt_pk_f16_f32 v1, v2, v3
+; GFX950-NEXT: v_cvt_pk_f16_f32 v2, v4, v5
+; GFX950-NEXT: v_cvt_pk_f16_f32 v3, v6, v7
+; GFX950-NEXT: v_cvt_pk_f16_f32 v4, v8, v9
+; GFX950-NEXT: v_cvt_pk_f16_f32 v5, v10, v11
+; GFX950-NEXT: v_cvt_pk_f16_f32 v6, v12, v13
+; GFX950-NEXT: v_cvt_pk_f16_f32 v7, v14, v15
+; GFX950-NEXT: s_setpc_b64 s[30:31]
+ %res = fptrunc <16 x float> %src to <16 x half>
+ ret <16 x half> %res
+}
+
define half @fptrunc_v2f32_v2f16_extract_uses(<2 x float> %src) {
; GFX950-LABEL: fptrunc_v2f32_v2f16_extract_uses:
; GFX950: ; %bb.0:
@@ -50,6 +88,35 @@ define half @fptrunc_v2f32_v2f16_extract_uses(<2 x float> %src) {
ret half %rslt
}
+define half @fptrunc_v3f32_v3f16_extract_uses(<3 x float> %vec_float) {
+; GFX950-SDAG-LABEL: fptrunc_v3f32_v3f16_extract_uses:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX950-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX950-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX950-SDAG-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-SDAG-NEXT: v_add_f16_e32 v0, v2, v0
+; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX950-GISEL-LABEL: fptrunc_v3f32_v3f16_extract_uses:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX950-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX950-GISEL-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX950-GISEL-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-GISEL-NEXT: v_add_f16_e32 v0, v2, v0
+; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %vec_half = fptrunc <3 x float> %vec_float to <3 x half>
+ %f0 = extractelement <3 x half> %vec_half, i64 0
+ %f1 = extractelement <3 x half> %vec_half, i64 1
+ %f2 = extractelement <3 x half> %vec_half, i64 2
+ %sum0 = fadd half %f0, %f1
+ %rslt = fadd half %f2, %sum0
+ ret half %rslt
+}
+
define half @fptrunc_v4f32_v4f16_extract_uses(<4 x float> %vec_float) {
; GFX950-SDAG-LABEL: fptrunc_v4f32_v4f16_extract_uses:
; GFX950-SDAG: ; %bb.0:
@@ -132,6 +199,97 @@ define half @fptrunc_v8f32_v8f16_extract_uses(<8 x float> %vec_float) {
ret half %rslt
}
+define half @fptrunc_v16f32_v16f16_extract_uses(<16 x float> %vec_float) {
+; GFX950-SDAG-LABEL: fptrunc_v16f32_v16f16_extract_uses:
+; GFX950-SDAG: ; %bb.0:
+; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v14, v14, v15
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v12, v12, v13
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v10, v10, v11
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v8, v8, v9
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v6, v6, v7
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v4, v4, v5
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v2, v2, v3
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v1, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v2, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v3, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v4, v8, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v5, v10, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v6, v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_sdwa v7, v14, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-SDAG-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-SDAG-NEXT: v_add_f16_e32 v1, v2, v3
+; GFX950-SDAG-NEXT: v_add_f16_e32 v2, v4, v5
+; GFX950-SDAG-NEXT: v_add_f16_e32 v3, v6, v7
+; GFX950-SDAG-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-SDAG-NEXT: v_add_f16_e32 v1, v2, v3
+; GFX950-SDAG-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX950-GISEL-LABEL: fptrunc_v16f32_v16f16_extract_uses:
+; GFX950-GISEL: ; %bb.0:
+; GFX950-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v1, v2, v3
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v2, v4, v5
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v3, v6, v7
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v4, v8, v9
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v5, v10, v11
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v6, v12, v13
+; GFX950-GISEL-NEXT: v_cvt_pk_f16_f32 v7, v14, v15
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v4, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v5, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v6, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_sdwa v7, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX950-GISEL-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-GISEL-NEXT: v_add_f16_e32 v1, v2, v3
+; GFX950-GISEL-NEXT: v_add_f16_e32 v2, v4, v5
+; GFX950-GISEL-NEXT: v_add_f16_e32 v3, v6, v7
+; GFX950-GISEL-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-GISEL-NEXT: v_add_f16_e32 v1, v2, v3
+; GFX950-GISEL-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX950-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %vec_half = fptrunc <16 x float> %vec_float to <16 x half>
+ %f0 = extractelement <16 x half> %vec_half, i64 0
+ %f1 = extractelement <16 x half> %vec_half, i64 1
+ %f2 = extractelement <16 x half> %vec_half, i64 2
+ %f3 = extractelement <16 x half> %vec_half, i64 3
+ %f4 = extractelement <16 x half> %vec_half, i64 4
+ %f5 = extractelement <16 x half> %vec_half, i64 5
+ %f6 = extractelement <16 x half> %vec_half, i64 6
+ %f7 = extractelement <16 x half> %vec_half, i64 7
+ %f8 = extractelement <16 x half> %vec_half, i64 8
+ %f9 = extractelement <16 x half> %vec_half, i64 9
+ %f10 = extractelement <16 x half> %vec_half, i64 10
+ %f11 = extractelement <16 x half> %vec_half, i64 11
+ %f12 = extractelement <16 x half> %vec_half, i64 12
+ %f13 = extractelement <16 x half> %vec_half, i64 13
+ %f14 = extractelement <16 x half> %vec_half, i64 14
+ %f15 = extractelement <16 x half> %vec_half, i64 15
+ %sum0 = fadd half %f0, %f1
+ %sum1 = fadd half %f2, %f3
+ %sum2 = fadd half %f4, %f5
+ %sum3 = fadd half %f6, %f7
+ %sum4 = fadd half %f8, %f9
+ %sum5 = fadd half %f10, %f11
+ %sum6 = fadd half %f12, %f13
+ %sum7 = fadd half %f14, %f15
+ %sum8 = fadd half %sum0, %sum1
+ %sum9 = fadd half %sum2, %sum3
+ %sum10 = fadd half %sum4, %sum5
+ %sum11 = fadd half %sum6, %sum7
+ %sum12 = fadd half %sum8, %sum9
+ %sum13 = fadd half %sum10, %sum11
+ %rslt = fadd half %sum12, %sum13
+ ret half %rslt
+}
+
define <2 x half> @v_test_cvt_v2f64_v2f16(<2 x double> %src) {
; GFX950-SDAG-LABEL: v_test_cvt_v2f64_v2f16:
; GFX950-SDAG: ; %bb.0:
>From 833b904563c0630f35a38f75ba23a0115a6df95a Mon Sep 17 00:00:00 2001
From: Changpeng Fang <changpeng.fang at amd.com>
Date: Thu, 29 May 2025 15:04:34 -0700
Subject: [PATCH 4/4] AMDGPU: Custom lower vector fptrunc of f32 -> f16
GFx950+ supports v_cvt_pk_f16_f32. However current implementation
of vector fptrunc lowering fully scalarizes the vector, and the scalar
conversions may not always be combined to generate the packed one.
We made v2f32 -> v2f16 legal in https://github.com/llvm/llvm-project/pull/139956.
This work is an extension to handle wider vectors. Instead of fully scalarization,
we split the vector to packs (v2f32 -> v2f16) to ensure the packed conversion can always
been generated.
NOTE:
minor changes
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 4064758c7fcaa..1e6eb080bc0e1 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6906,7 +6906,7 @@ SDValue SITargetLowering::splitFP_ROUNDVectorOp(SDValue Op,
SelectionDAG &DAG) const {
EVT DstVT = Op.getValueType();
unsigned NumElts = DstVT.getVectorNumElements();
- assert(isPowerOf2_32(NumElts) && "Number of elements must be power of 2");
+ assert(NumElts > 2 && isPowerOf2_32(NumElts));
auto [Lo, Hi] = DAG.SplitVectorOperand(Op.getNode(), 0);
@@ -6930,7 +6930,7 @@ SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
assert(Subtarget->hasCvtPkF16F32Inst() && "support v_cvt_pk_f16_f32");
if (SrcVT.getScalarType() != MVT::f32)
return SDValue();
- return DstVT == MVT::v2f16 ? Op : splitFP_ROUNDVectorOp(Op, DAG);
+ return SrcVT == MVT::v2f32 ? Op : splitFP_ROUNDVectorOp(Op, DAG);
}
if (SrcVT.getScalarType() != MVT::f64)
More information about the llvm-commits
mailing list