[llvm-branch-commits] [clang] [llvm] AMDGPU: Builtins & Codegen support for v_cvt_scalef32_pk_f32_{fp8|bf8} for gfx950 (PR #117741)
Matt Arsenault via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Nov 26 08:50:44 PST 2024
https://github.com/arsenm created https://github.com/llvm/llvm-project/pull/117741
OPSEL[0] determines low/high 16 bits of src0 to read.
Co-authored-by: Pravin Jagtap <Pravin.Jagtap at amd.com>
>From 8f2c64c1b0ade2a055ec958f203706f37fc85338 Mon Sep 17 00:00:00 2001
From: Pravin Jagtap <Pravin.Jagtap at amd.com>
Date: Thu, 18 Apr 2024 01:18:49 -0400
Subject: [PATCH] AMDGPU: Builtins & Codegen support for
v_cvt_scalef32_pk_f32_{fp8|bf8} for gfx950
OPSEL[0] determines low/high 16 bits of src0 to read.
Co-authored-by: Pravin Jagtap <Pravin.Jagtap at amd.com>
---
clang/include/clang/Basic/BuiltinsAMDGPU.def | 3 +
.../builtins-amdgcn-gfx950-err.cl | 6 +-
.../CodeGenOpenCL/builtins-amdgcn-gfx950.cl | 56 ++++++++++++++++++-
.../builtins-amdgcn-error-gfx950-param.cl | 5 +-
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 12 ++++
.../AMDGPU/AMDGPUInstructionSelector.cpp | 7 +++
.../Target/AMDGPU/AMDGPUInstructionSelector.h | 3 +
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 2 +
llvm/lib/Target/AMDGPU/VOP3Instructions.td | 15 +++++
.../llvm.amdgcn.cvt.scalef32.pk.gfx950.ll | 42 ++++++++++++++
10 files changed, 147 insertions(+), 4 deletions(-)
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index a292640b7c4f21..f92e5c17188ec9 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -576,5 +576,8 @@ TARGET_BUILTIN(__builtin_amdgcn_cvt_scalef32_f32_fp8, "fifIi", "nc", "fp8-cvt-sc
TARGET_BUILTIN(__builtin_amdgcn_cvt_scalef32_f32_bf8, "fifIi", "nc", "bf8-cvt-scale-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_scalef32_pk_fp8_f32, "V2sV2sfffIb", "nc", "fp8-cvt-scale-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_scalef32_pk_bf8_f32, "V2sV2sfffIb", "nc", "bf8-cvt-scale-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scalef32_pk_f32_fp8, "V2fUifIb", "nc", "fp8-cvt-scale-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scalef32_pk_f32_bf8, "V2fUifIb", "nc", "bf8-cvt-scale-insts")
+
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950-err.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950-err.cl
index 5ec769dc6a84bc..54b8573a1b4de3 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950-err.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950-err.cl
@@ -14,8 +14,10 @@ typedef unsigned int uint;
typedef unsigned int uint2 __attribute__((ext_vector_type(2)));
typedef half __attribute__((ext_vector_type(2))) half2;
typedef short __attribute__((ext_vector_type(2))) short2;
+typedef float __attribute__((ext_vector_type(2))) float2;
-void test(global uint* out, global uint2* out_v2u32, uint a, uint b, global half2* out_v2f16, global float* out_f32, float scale, global short2* out_v2i16, float src0, float src1) {
+void test(global uint* out, global uint2* out_v2u32, uint a, uint b, global half2* out_v2f16, global float* out_f32, float scale, global short2* out_v2i16, float src0, float src1,
+ global float2* out_v2f32) {
*out = __builtin_amdgcn_prng_b32(a); // expected-error{{'__builtin_amdgcn_prng_b32' needs target feature prng-inst}}
*out_v2u32 = __builtin_amdgcn_permlane16_swap(a, b, false, false); // expected-error{{'__builtin_amdgcn_permlane16_swap' needs target feature permlane16-swap}}
*out_v2u32 = __builtin_amdgcn_permlane32_swap(a, b, false, false); // expected-error{{'__builtin_amdgcn_permlane32_swap' needs target feature permlane32-swap}}
@@ -25,4 +27,6 @@ void test(global uint* out, global uint2* out_v2u32, uint a, uint b, global half
*out_f32 = __builtin_amdgcn_cvt_scalef32_f32_bf8(a, scale, 0); // expected-error{{'__builtin_amdgcn_cvt_scalef32_f32_bf8' needs target feature bf8-cvt-scale-insts}}
*out_v2i16 = __builtin_amdgcn_cvt_scalef32_pk_fp8_f32(*out_v2i16, src0, src1, scale, true); // expected-error{{'__builtin_amdgcn_cvt_scalef32_pk_fp8_f32' needs target feature fp8-cvt-scale-insts}}
*out_v2i16 = __builtin_amdgcn_cvt_scalef32_pk_bf8_f32(*out_v2i16, src0, src1, scale, true); // expected-error{{'__builtin_amdgcn_cvt_scalef32_pk_bf8_f32' needs target feature bf8-cvt-scale-insts}}
+ *out_v2f32 = __builtin_amdgcn_cvt_scalef32_pk_f32_fp8(a, scale, true); // expected-error{{'__builtin_amdgcn_cvt_scalef32_pk_f32_fp8' needs target feature fp8-cvt-scale-insts}}
+ *out_v2f32 = __builtin_amdgcn_cvt_scalef32_pk_f32_bf8(a, scale, true); // expected-error{{'__builtin_amdgcn_cvt_scalef32_pk_f32_bf8' needs target feature bf8-cvt-scale-insts}}
}
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
index 9f23474226791c..1313c5ec8d5443 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx950.cl
@@ -13,6 +13,7 @@ typedef short __attribute__((ext_vector_type(2))) short2;
typedef __bf16 __attribute__((ext_vector_type(2))) bfloat2;
typedef float __attribute__((ext_vector_type(16))) float16;
typedef half __attribute__((ext_vector_type(2))) half2;
+typedef float __attribute__((ext_vector_type(2))) float2;
// CHECK-LABEL: @test_prng_b32(
// CHECK-NEXT: entry:
@@ -498,7 +499,6 @@ void test_cvt_scalef32_f32_bf8(global float* out, uint src, float scale)
*out = __builtin_amdgcn_cvt_scalef32_f32_bf8(src, scale, 3);
}
-
// CHECK-LABEL: @test_cvt_scalef32_pk_fp8_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
@@ -533,7 +533,6 @@ void test_cvt_scalef32_pk_fp8_f32(global short2* out, float src0, float src1, fl
*out = __builtin_amdgcn_cvt_scalef32_pk_fp8_f32(*out, src0, src1, scale, false);
}
-
// CHECK-LABEL: @test_cvt_scalef32_pk_bf8_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
@@ -567,3 +566,56 @@ void test_cvt_scalef32_pk_bf8_f32(global short2* out, float src0, float src1, fl
*out = __builtin_amdgcn_cvt_scalef32_pk_bf8_f32(*out, src0, src1, scale, true);
*out = __builtin_amdgcn_cvt_scalef32_pk_bf8_f32(*out, src0, src1, scale, false);
}
+
+
+// CHECK-LABEL: @test_cvt_scalef32_pk_f32_fp8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[SCALE_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store i32 [[SRC:%.*]], ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: store float [[SCALE:%.*]], ptr addrspace(5) [[SCALE_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr addrspace(5) [[SCALE_ADDR]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp8(i32 [[TMP0]], float [[TMP1]], i1 true)
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store <2 x float> [[TMP2]], ptr addrspace(1) [[TMP3]], align 8
+// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr addrspace(5) [[SCALE_ADDR]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp8(i32 [[TMP4]], float [[TMP5]], i1 false)
+// CHECK-NEXT: [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store <2 x float> [[TMP6]], ptr addrspace(1) [[TMP7]], align 8
+// CHECK-NEXT: ret void
+//
+void test_cvt_scalef32_pk_f32_fp8(global float2* out, unsigned int src, float scale)
+{
+ *out = __builtin_amdgcn_cvt_scalef32_pk_f32_fp8(src, scale, true);
+ *out = __builtin_amdgcn_cvt_scalef32_pk_f32_fp8(src, scale, false);
+}
+
+// CHECK-LABEL: @test_cvt_scalef32_pk_f32_bf8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[SCALE_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store i32 [[SRC:%.*]], ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: store float [[SCALE:%.*]], ptr addrspace(5) [[SCALE_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr addrspace(5) [[SCALE_ADDR]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.bf8(i32 [[TMP0]], float [[TMP1]], i1 true)
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store <2 x float> [[TMP2]], ptr addrspace(1) [[TMP3]], align 8
+// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(5) [[SRC_ADDR]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr addrspace(5) [[SCALE_ADDR]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.bf8(i32 [[TMP4]], float [[TMP5]], i1 false)
+// CHECK-NEXT: [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[OUT_ADDR]], align 8
+// CHECK-NEXT: store <2 x float> [[TMP6]], ptr addrspace(1) [[TMP7]], align 8
+// CHECK-NEXT: ret void
+//
+void test_cvt_scalef32_pk_f32_bf8(global float2* out, unsigned int src, float scale)
+{
+ *out = __builtin_amdgcn_cvt_scalef32_pk_f32_bf8(src, scale, true);
+ *out = __builtin_amdgcn_cvt_scalef32_pk_f32_bf8(src, scale, false);
+}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl
index fa896f9be782ed..7aed9975b5cc22 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx950-param.cl
@@ -13,6 +13,7 @@ typedef int int16 __attribute__((ext_vector_type(16)));
typedef unsigned int uint;
typedef half half2 __attribute__((ext_vector_type(2)));
typedef short short2 __attribute__((ext_vector_type(2)));
+typedef float float2 __attribute__((ext_vector_type(2)));
void test_mfma_f32_16x16x32_f16(__global float4* out, half8 a, half8 b, float4 c, int X) {
@@ -162,11 +163,13 @@ void test_permlane32_swap(__global int* out, int old, int src, bool X) {
}
void test_cvt_scalef32(global half2* out_v2f16, global float* out_f32, uint src, float scale, int index, bool X,
- global short2* out_v2i16, float src0, float src1) {
+ global short2* out_v2i16, float src0, float src1, global float2* out_v2f32) {
*out_v2f16 = __builtin_amdgcn_cvt_scalef32_f16_fp8(*out_v2f16, src, scale, index, X); // expected-error{{argument to '__builtin_amdgcn_cvt_scalef32_f16_fp8' must be a constant integer}}
*out_f32 = __builtin_amdgcn_cvt_scalef32_f32_fp8(src, scale, index); // // expected-error{{argument to '__builtin_amdgcn_cvt_scalef32_f32_fp8' must be a constant integer}}
*out_v2f16 = __builtin_amdgcn_cvt_scalef32_f16_bf8(*out_v2f16, src, scale, index, X); // expected-error{{argument to '__builtin_amdgcn_cvt_scalef32_f16_bf8' must be a constant integer}}
*out_f32 = __builtin_amdgcn_cvt_scalef32_f32_bf8(src, scale, index); // expected-error{{argument to '__builtin_amdgcn_cvt_scalef32_f32_bf8' must be a constant integer}}
*out_v2i16 = __builtin_amdgcn_cvt_scalef32_pk_fp8_f32(*out_v2i16, src0, src1, scale, X); // expected-error{{argument to '__builtin_amdgcn_cvt_scalef32_pk_fp8_f32' must be a constant integer}}
*out_v2i16 = __builtin_amdgcn_cvt_scalef32_pk_bf8_f32(*out_v2i16, src0, src1, scale, X); // expected-error{{argument to '__builtin_amdgcn_cvt_scalef32_pk_bf8_f32' must be a constant integer}}
+ *out_v2f32 = __builtin_amdgcn_cvt_scalef32_pk_f32_fp8(src, scale, X); // expected-error{{argument to '__builtin_amdgcn_cvt_scalef32_pk_f32_fp8' must be a constant integer}}
+ *out_v2f32 = __builtin_amdgcn_cvt_scalef32_pk_f32_bf8(src, scale, X); // expected-error{{argument to '__builtin_amdgcn_cvt_scalef32_pk_f32_bf8' must be a constant integer}}
}
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index b8e2f0d07387fa..4e02f1d33a1fcc 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -617,6 +617,14 @@ class AMDGPUCvtScaleFP8BF8ToF32Intrinsic<LLVMType DstTy, string name> : DefaultA
[IntrNoMem, IntrWillReturn, ImmArg<ArgIndex<2>>]
>, ClangBuiltin<"__builtin_amdgcn_"#name>;
+class AMDGPUCvtScale_pk_FP8BF8ToF32Intrinsic<string name> : DefaultAttrsIntrinsic<
+ [llvm_v2f32_ty],
+ [llvm_i32_ty, // src
+ llvm_float_ty, // scale
+ llvm_i1_ty], // src_lo_hi_sel[true false]
+ [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]
+>, ClangBuiltin<"__builtin_amdgcn_"#name>;
+
class AMDGPUCvtScaleF32ToFP8BF8TiedInputIntrinsic<string name> : DefaultAttrsIntrinsic<
[llvm_v2i16_ty],
[llvm_v2i16_ty, // old_vdst
@@ -649,6 +657,10 @@ def int_amdgcn_cvt_scalef32_f32_bf8 : AMDGPUCvtScaleFP8BF8ToF32Intrinsic<llvm_f
def int_amdgcn_cvt_scalef32_pk_fp8_f32 : AMDGPUCvtScaleF32ToFP8BF8TiedInputIntrinsic<"cvt_scalef32_pk_fp8_f32">;
def int_amdgcn_cvt_scalef32_pk_bf8_f32 : AMDGPUCvtScaleF32ToFP8BF8TiedInputIntrinsic<"cvt_scalef32_pk_bf8_f32">;
+// llvm.amdgcn.cvt.scalef32.pk.fp32.fp8 int src, float scale, bool src_lo_hi_sel
+def int_amdgcn_cvt_scalef32_pk_f32_fp8 : AMDGPUCvtScale_pk_FP8BF8ToF32Intrinsic<"cvt_scalef32_pk_f32_fp8">;
+def int_amdgcn_cvt_scalef32_pk_f32_bf8 : AMDGPUCvtScale_pk_FP8BF8ToF32Intrinsic<"cvt_scalef32_pk_f32_bf8">;
+
def int_amdgcn_prng_b32 : DefaultAttrsIntrinsic<
[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]
>, ClangBuiltin<"__builtin_amdgcn_prng_b32">;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index af0bd11183579f..479364a4c4eae3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -5853,6 +5853,13 @@ void AMDGPUInstructionSelector::renderDstSelToOpSelXForm(
: 0);
}
+void AMDGPUInstructionSelector::renderSrcSelToOpSelXForm(
+ MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
+ assert(OpIdx >= 0 && "expected to match an immediate operand");
+ MIB.addImm(MI.getOperand(OpIdx).getImm() ? (int64_t)(SISrcMods::OP_SEL_0)
+ : 0);
+}
+
void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index 2fe96915c57cef..c6b387f2a25b2a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -354,6 +354,9 @@ class AMDGPUInstructionSelector final : public InstructionSelector {
void renderDstSelToOpSelXForm(MachineInstrBuilder &MIB,
const MachineInstr &MI, int OpIdx) const;
+ void renderSrcSelToOpSelXForm(MachineInstrBuilder &MIB,
+ const MachineInstr &MI, int OpIdx) const;
+
void renderNegateImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 51c539fac26132..4c93b6ce4eef1f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4553,6 +4553,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_cvt_scalef32_f32_bf8:
case Intrinsic::amdgcn_cvt_scalef32_pk_fp8_f32:
case Intrinsic::amdgcn_cvt_scalef32_pk_bf8_f32:
+ case Intrinsic::amdgcn_cvt_scalef32_pk_f32_fp8:
+ case Intrinsic::amdgcn_cvt_scalef32_pk_f32_bf8:
case Intrinsic::amdgcn_ashr_pk_i8_i32:
case Intrinsic::amdgcn_ashr_pk_u8_i32:
case Intrinsic::amdgcn_cvt_scalef32_2xpk16_fp6_f32:
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 8511211fd64eef..5bf45b949a79c4 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -877,6 +877,14 @@ def DstSelToOpSelXForm : SDNodeXForm<timm, [{
def gi_DstSelToOpSelXForm : GICustomOperandRenderer<"renderDstSelToOpSelXForm">,
GISDNodeXFormEquiv<DstSelToOpSelXForm>;
+def SrcSelToOpSelXForm : SDNodeXForm<timm, [{
+ return CurDAG->getTargetConstant(
+ N->getZExtValue() ? SISrcMods::OP_SEL_0 : SISrcMods::NONE,
+ SDLoc(N), MVT::i32);
+}]>;
+def gi_SrcSelToOpSelXForm : GICustomOperandRenderer<"renderSrcSelToOpSelXForm">,
+ GISDNodeXFormEquiv<SrcSelToOpSelXForm>;
+
class PermlanePat<SDPatternOperator permlane,
Instruction inst, ValueType vt> : GCNPat<
(vt (permlane vt:$vdst_in, vt:$src0, i32:$src1, i32:$src2,
@@ -1100,6 +1108,13 @@ class Cvt_Scale_PK_F32ToFP8BF8_Pat<SDPatternOperator node, VOP3_Pseudo inst> : G
def : Cvt_Scale_PK_F32ToFP8BF8_Pat<int_amdgcn_cvt_scalef32_pk_fp8_f32, V_CVT_SCALEF32_PK_FP8_F32_e64>;
def : Cvt_Scale_PK_F32ToFP8BF8_Pat<int_amdgcn_cvt_scalef32_pk_bf8_f32, V_CVT_SCALEF32_PK_BF8_F32_e64>;
+class Cvt_Scale_PK_FP8BF8ToF32_Pat<SDPatternOperator node, VOP3_Pseudo inst> : GCNPat<
+ (v2f32 (node i32:$src0, f32:$src1, timm:$word_sel)),
+ (inst (SrcSelToOpSelXForm $word_sel), $src0, 0, $src1)
+>;
+def : Cvt_Scale_PK_FP8BF8ToF32_Pat<int_amdgcn_cvt_scalef32_pk_f32_fp8, V_CVT_SCALEF32_PK_F32_FP8_e64>;
+def : Cvt_Scale_PK_FP8BF8ToF32_Pat<int_amdgcn_cvt_scalef32_pk_f32_bf8, V_CVT_SCALEF32_PK_F32_BF8_e64>;
+
let SubtargetPredicate = isGFX10Plus in {
let isCommutable = 1, isReMaterializable = 1 in {
defm V_XOR3_B32 : VOP3Inst <"v_xor3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
index 902f8119dfa398..f3ad5191491b54 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
@@ -10,6 +10,8 @@ declare <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half>, i32, float, i32
declare float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32, float, i32)
declare <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.fp8.f32(<2 x i16>, float, float, float, i1)
declare <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.bf8.f32(<2 x i16>, float, float, float, i1)
+declare <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp8(i32, float, i1)
+declare <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.bf8(i32, float, i1)
define amdgpu_ps void @test_scalef32_pk32_fp6_f32_vv(<16 x float> %src, float %scale, ptr addrspace(1) %out) {
; GFX950-SDAG-LABEL: test_scalef32_pk32_fp6_f32_vv:
@@ -518,3 +520,43 @@ define <2 x i16> @test_cvt_scalef32_pk_bf8_f32_word1_fabs_fneg(<2 x i16> %old, f
%ret = tail call <2 x i16> @llvm.amdgcn.cvt.scalef32.pk.bf8.f32(<2 x i16> %old, float %fabs.src0, float %fneg.src1, float %scale, i1 true)
ret <2 x i16> %ret
}
+
+define <2 x float> @test_cvt_scalef32_pk_f32_fp8_word0(i32 %src, float %scale) {
+; GCN-LABEL: test_cvt_scalef32_pk_f32_fp8_word0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp8 v[0:1], v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp8(i32 %src, float %scale, i1 false)
+ ret <2 x float> %ret
+}
+
+define <2 x float> @test_cvt_scalef32_pk_f32_fp8_word1(i32 %src, float %scale) {
+; GCN-LABEL: test_cvt_scalef32_pk_f32_fp8_word1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp8 v[0:1], v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp8(i32 %src, float %scale, i1 true)
+ ret <2 x float> %ret
+}
+
+define <2 x float> @test_cvt_scalef32_pk_f32_bf8_word0(i32 %src, float %scale) {
+; GCN-LABEL: test_cvt_scalef32_pk_f32_bf8_word0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cvt_scalef32_pk_f32_bf8 v[0:1], v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.bf8(i32 %src, float %scale, i1 false)
+ ret <2 x float> %ret
+}
+
+define <2 x float> @test_cvt_scalef32_pk_f32_bf8_word1(i32 %src, float %scale) {
+; GCN-LABEL: test_cvt_scalef32_pk_f32_bf8_word1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cvt_scalef32_pk_f32_bf8 v[0:1], v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.bf8(i32 %src, float %scale, i1 true)
+ ret <2 x float> %ret
+}
More information about the llvm-branch-commits
mailing list