[llvm-branch-commits] [clang] [llvm] [AMDGPU] Add support for `v_sat_pk4_i4_[i8, u8]` on gfx1250 (PR #149528)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Jul 18 08:08:28 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clang
Author: Shilei Tian (shiltian)
<details>
<summary>Changes</summary>
Co-authored-by: Mekhanoshin, Stanislav <Stanislav.Mekhanoshin@<!-- -->amd.com>
Co-authored-by: Foad, Jay <Jay.Foad@<!-- -->amd.com>
---
Patch is 60.85 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/149528.diff
25 Files Affected:
- (modified) clang/include/clang/Basic/BuiltinsAMDGPU.def (+2)
- (modified) clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl (+25)
- (modified) llvm/include/llvm/IR/IntrinsicsAMDGPU.td (+6)
- (modified) llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp (+2)
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.td (+1)
- (modified) llvm/lib/Target/AMDGPU/VOP1Instructions.td (+5)
- (added) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sat.pk.ll (+305)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop1-fake16.s (+51)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s (+30)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16-fake16.s (+16)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s (+24)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8-fake16.s (+16)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s (+24)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s (+24)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s (+30)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s (+16)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s (+24)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s (+16)
- (modified) llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s (+24)
- (modified) llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt (+42)
- (modified) llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt (+22)
- (modified) llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt (+22)
- (modified) llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt (+56)
- (modified) llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt (+24)
- (modified) llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt (+24)
``````````diff
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index a916af7e0c2df..d4fef5d46af73 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -684,6 +684,8 @@ TARGET_BUILTIN(__builtin_amdgcn_cvt_f16_fp8, "hiIi", "nc", "gfx1250-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_f16_bf8, "hiIi", "nc", "gfx1250-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_f16_fp8, "V2hs", "nc", "gfx1250-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_f16_bf8, "V2hs", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_sat_pk4_i4_i8, "UsUi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_sat_pk4_u4_u8, "UsUi", "nc", "gfx1250-insts")
// GFX1250 WMMA builtins
TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x4_f32, "V8fIbV2fIbV2fIsV8fIbIb", "nc", "gfx1250-insts,wavefrontsize32")
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
index 4c3f308a6cf75..a21862c4a9395 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
@@ -5,6 +5,7 @@
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
typedef unsigned int uint;
+typedef unsigned short int ushort;
typedef unsigned int __attribute__((ext_vector_type(2))) uint2;
typedef half __attribute__((ext_vector_type(2))) half2;
@@ -369,6 +370,30 @@ void test_cvt_pk_f16_bf8(global half2* out, short a)
out[0] = __builtin_amdgcn_cvt_pk_f16_bf8(a);
}
+// CHECK-LABEL: @test_sat_pk4_i4_i8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC_ADDR]] to ptr
+// CHECK-NEXT: store ptr [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[SRC:%.*]], ptr [[SRC_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.amdgcn.sat.pk4.i4.i8(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i16 [[TMP1]], ptr [[TMP2]], align 2
+// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[SRC_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = call i16 @llvm.amdgcn.sat.pk4.u4.u8(i32 [[TMP3]])
+// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i16 [[TMP4]], ptr [[TMP5]], align 2
+// CHECK-NEXT: ret void
+//
+void test_sat_pk4_i4_i8(ushort *out, uint src)
+{
+ *out = __builtin_amdgcn_sat_pk4_i4_i8(src);
+ *out = __builtin_amdgcn_sat_pk4_u4_u8(src);
+}
+
// CHECK-LABEL: @test_permlane16_swap(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index d8fda0e2bcfa3..ecda6c4efefe3 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3572,6 +3572,12 @@ def int_amdgcn_cvt_f16_bf8 : ClangBuiltin<"__builtin_amdgcn_cvt_f16_bf8">,
[llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+def int_amdgcn_sat_pk4_i4_i8 : ClangBuiltin<"__builtin_amdgcn_sat_pk4_i4_i8">,
+ DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
+
+def int_amdgcn_sat_pk4_u4_u8 : ClangBuiltin<"__builtin_amdgcn_sat_pk4_u4_u8">,
+ DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
+
//===----------------------------------------------------------------------===//
// Special Intrinsics for backend internal use only. No frontend
// should emit calls to these.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index cbbb57c6f8122..bf2f37bddb9ed 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4558,6 +4558,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_cvt_pk_u16:
case Intrinsic::amdgcn_cvt_pk_f16_fp8:
case Intrinsic::amdgcn_cvt_pk_f16_bf8:
+ case Intrinsic::amdgcn_sat_pk4_i4_i8:
+ case Intrinsic::amdgcn_sat_pk4_u4_u8:
case Intrinsic::amdgcn_fmed3:
case Intrinsic::amdgcn_cubeid:
case Intrinsic::amdgcn_cubema:
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index ab7d34002e9f1..9e1951e2946c4 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -2850,6 +2850,7 @@ def VOP_F16_I16 : VOPProfile <[f16, i16, untyped, untyped]>;
def VOP_I16_F16 : VOPProfile <[i16, f16, untyped, untyped]>;
def VOP_I16_I16 : VOPProfile <[i16, i16, untyped, untyped]>;
def VOP_BF16_BF16 : VOPProfile<[bf16, bf16, untyped, untyped]>;
+def VOP1_I16_I32 : VOPProfile<[i16, i32, untyped, untyped]>;
def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>;
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 80eb5d8b7d571..f621f8581f778 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -803,6 +803,9 @@ let SubtargetPredicate = isGFX1250Plus in {
def : Cvt_F_F8_Pat_ByteSel<int_amdgcn_cvt_f16_fp8, V_CVT_F16_FP8_fake16_e64, 1>;
def : Cvt_F_F8_Pat_ByteSel<int_amdgcn_cvt_f16_bf8, V_CVT_F16_BF8_fake16_e64, 1>;
}
+
+ defm V_SAT_PK4_I4_I8 : VOP1Inst_t16<"v_sat_pk4_i4_i8", VOP1_I16_I32, int_amdgcn_sat_pk4_i4_i8>;
+ defm V_SAT_PK4_U4_U8 : VOP1Inst_t16<"v_sat_pk4_u4_u8", VOP1_I16_I32, int_amdgcn_sat_pk4_u4_u8>;
} // End SubtargetPredicate = isGFX1250Plus
let SubtargetPredicate = isGFX10Plus in {
@@ -1158,6 +1161,8 @@ defm V_PERMLANE16_SWAP_B32 : VOP1_Real_OpSelIsDPP_gfx1250<0x049>;
defm V_TANH_BF16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x04a>;
defm V_PRNG_B32 : VOP1_Real_FULL<GFX1250Gen, 0x04b>;
defm V_CVT_F32_BF16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x072, "v_cvt_f32_bf16", "V_CVT_F32_BF16_gfx1250">;
+defm V_SAT_PK4_I4_I8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x073>;
+defm V_SAT_PK4_U4_U8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x074>;
defm V_CVT_PK_F16_FP8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x075>;
defm V_CVT_PK_F16_BF8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x076>;
defm V_CVT_F16_FP8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x077>;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sat.pk.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sat.pk.ll
new file mode 100644
index 0000000000000..3a5507063b834
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sat.pk.ll
@@ -0,0 +1,305 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=+real-true16 %s -o - | FileCheck -check-prefix=SDAG-REAL16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=-real-true16 %s -o - | FileCheck -check-prefix=SDAG-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=+real-true16 %s -o - | FileCheck -check-prefix=GISEL-REAL16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=-real-true16 %s -o - | FileCheck -check-prefix=GISEL-FAKE16 %s
+
+declare i16 @llvm.amdgcn.sat.pk4.i4.i8(i32) #0
+declare i16 @llvm.amdgcn.sat.pk4.u4.u8(i32) #0
+
+define amdgpu_kernel void @sat_pk4_i4_i8_f32_v(i32 %src, ptr %out) #1 {
+; SDAG-REAL16-LABEL: sat_pk4_i4_i8_f32_v:
+; SDAG-REAL16: ; %bb.0:
+; SDAG-REAL16-NEXT: s_clause 0x1
+; SDAG-REAL16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; SDAG-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0
+; SDAG-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, s2
+; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; SDAG-REAL16-NEXT: s_endpgm
+;
+; SDAG-FAKE16-LABEL: sat_pk4_i4_i8_f32_v:
+; SDAG-FAKE16: ; %bb.0:
+; SDAG-FAKE16-NEXT: s_clause 0x1
+; SDAG-FAKE16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; SDAG-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; SDAG-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v1, s2
+; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1]
+; SDAG-FAKE16-NEXT: s_endpgm
+;
+; GISEL-REAL16-LABEL: sat_pk4_i4_i8_f32_v:
+; GISEL-REAL16: ; %bb.0:
+; GISEL-REAL16-NEXT: s_clause 0x1
+; GISEL-REAL16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; GISEL-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0
+; GISEL-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, s2
+; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-REAL16-NEXT: s_endpgm
+;
+; GISEL-FAKE16-LABEL: sat_pk4_i4_i8_f32_v:
+; GISEL-FAKE16: ; %bb.0:
+; GISEL-FAKE16-NEXT: s_clause 0x1
+; GISEL-FAKE16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; GISEL-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GISEL-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v0, s2
+; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = call i16 @llvm.amdgcn.sat.pk4.i4.i8(i32 %src) #0
+ store i16 %cvt, ptr %out, align 2
+ ret void
+}
+
+define amdgpu_kernel void @sat_pk4_i4_i8_f32_s(i32 inreg %src, ptr %out) #1 {
+; SDAG-REAL16-LABEL: sat_pk4_i4_i8_f32_s:
+; SDAG-REAL16: ; %bb.1:
+; SDAG-REAL16-NEXT: s_load_b32 s8, s[4:5], 0x0
+; SDAG-REAL16-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-REAL16-NEXT: s_branch .LBB1_0
+; SDAG-REAL16-NEXT: .p2align 8
+; SDAG-REAL16-NEXT: ; %bb.2:
+; SDAG-REAL16-NEXT: .LBB1_0:
+; SDAG-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; SDAG-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, s8
+; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0
+; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; SDAG-REAL16-NEXT: s_endpgm
+;
+; SDAG-FAKE16-LABEL: sat_pk4_i4_i8_f32_s:
+; SDAG-FAKE16: ; %bb.1:
+; SDAG-FAKE16-NEXT: s_load_b32 s8, s[4:5], 0x0
+; SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-FAKE16-NEXT: s_branch .LBB1_0
+; SDAG-FAKE16-NEXT: .p2align 8
+; SDAG-FAKE16-NEXT: ; %bb.2:
+; SDAG-FAKE16-NEXT: .LBB1_0:
+; SDAG-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v1, s8
+; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1]
+; SDAG-FAKE16-NEXT: s_endpgm
+;
+; GISEL-REAL16-LABEL: sat_pk4_i4_i8_f32_s:
+; GISEL-REAL16: ; %bb.0:
+; GISEL-REAL16-NEXT: s_clause 0x1
+; GISEL-REAL16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; GISEL-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0
+; GISEL-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, s2
+; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-REAL16-NEXT: s_endpgm
+;
+; GISEL-FAKE16-LABEL: sat_pk4_i4_i8_f32_s:
+; GISEL-FAKE16: ; %bb.0:
+; GISEL-FAKE16-NEXT: s_clause 0x1
+; GISEL-FAKE16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; GISEL-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GISEL-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v0, s2
+; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = call i16 @llvm.amdgcn.sat.pk4.i4.i8(i32 %src) #0
+ store i16 %cvt, ptr %out, align 2
+ ret void
+}
+
+define amdgpu_kernel void @sat_pk4_i4_i8_f32_i(ptr %out) #1 {
+; SDAG-REAL16-LABEL: sat_pk4_i4_i8_f32_i:
+; SDAG-REAL16: ; %bb.0:
+; SDAG-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; SDAG-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, 0x64
+; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0
+; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; SDAG-REAL16-NEXT: s_endpgm
+;
+; SDAG-FAKE16-LABEL: sat_pk4_i4_i8_f32_i:
+; SDAG-FAKE16: ; %bb.0:
+; SDAG-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v1, 0x64
+; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1]
+; SDAG-FAKE16-NEXT: s_endpgm
+;
+; GISEL-REAL16-LABEL: sat_pk4_i4_i8_f32_i:
+; GISEL-REAL16: ; %bb.0:
+; GISEL-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GISEL-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, 0x64
+; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0
+; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-REAL16-NEXT: s_endpgm
+;
+; GISEL-FAKE16-LABEL: sat_pk4_i4_i8_f32_i:
+; GISEL-FAKE16: ; %bb.0:
+; GISEL-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GISEL-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v0, 0x64
+; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = call i16 @llvm.amdgcn.sat.pk4.i4.i8(i32 100) #0
+ store i16 %cvt, ptr %out, align 2
+ ret void
+}
+
+define amdgpu_kernel void @sat_pk4_u4_u8_f32_v(i32 %src, ptr %out) #1 {
+; SDAG-REAL16-LABEL: sat_pk4_u4_u8_f32_v:
+; SDAG-REAL16: ; %bb.0:
+; SDAG-REAL16-NEXT: s_clause 0x1
+; SDAG-REAL16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; SDAG-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0
+; SDAG-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, s2
+; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; SDAG-REAL16-NEXT: s_endpgm
+;
+; SDAG-FAKE16-LABEL: sat_pk4_u4_u8_f32_v:
+; SDAG-FAKE16: ; %bb.0:
+; SDAG-FAKE16-NEXT: s_clause 0x1
+; SDAG-FAKE16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; SDAG-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; SDAG-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v1, s2
+; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1]
+; SDAG-FAKE16-NEXT: s_endpgm
+;
+; GISEL-REAL16-LABEL: sat_pk4_u4_u8_f32_v:
+; GISEL-REAL16: ; %bb.0:
+; GISEL-REAL16-NEXT: s_clause 0x1
+; GISEL-REAL16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; GISEL-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0
+; GISEL-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, s2
+; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-REAL16-NEXT: s_endpgm
+;
+; GISEL-FAKE16-LABEL: sat_pk4_u4_u8_f32_v:
+; GISEL-FAKE16: ; %bb.0:
+; GISEL-FAKE16-NEXT: s_clause 0x1
+; GISEL-FAKE16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; GISEL-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GISEL-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v0, s2
+; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = call i16 @llvm.amdgcn.sat.pk4.u4.u8(i32 %src) #0
+ store i16 %cvt, ptr %out, align 2
+ ret void
+}
+
+define amdgpu_kernel void @sat_pk4_u4_u8_f32_s(i32 inreg %src, ptr %out) #1 {
+; SDAG-REAL16-LABEL: sat_pk4_u4_u8_f32_s:
+; SDAG-REAL16: ; %bb.1:
+; SDAG-REAL16-NEXT: s_load_b32 s8, s[4:5], 0x0
+; SDAG-REAL16-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-REAL16-NEXT: s_branch .LBB4_0
+; SDAG-REAL16-NEXT: .p2align 8
+; SDAG-REAL16-NEXT: ; %bb.2:
+; SDAG-REAL16-NEXT: .LBB4_0:
+; SDAG-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; SDAG-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, s8
+; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0
+; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; SDAG-REAL16-NEXT: s_endpgm
+;
+; SDAG-FAKE16-LABEL: sat_pk4_u4_u8_f32_s:
+; SDAG-FAKE16: ; %bb.1:
+; SDAG-FAKE16-NEXT: s_load_b32 s8, s[4:5], 0x0
+; SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-FAKE16-NEXT: s_branch .LBB4_0
+; SDAG-FAKE16-NEXT: .p2align 8
+; SDAG-FAKE16-NEXT: ; %bb.2:
+; SDAG-FAKE16-NEXT: .LBB4_0:
+; SDAG-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v1, s8
+; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1]
+; SDAG-FAKE16-NEXT: s_endpgm
+;
+; GISEL-REAL16-LABEL: sat_pk4_u4_u8_f32_s:
+; GISEL-REAL16: ; %bb.0:
+; GISEL-REAL16-NEXT: s_clause 0x1
+; GISEL-REAL16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; GISEL-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0
+; GISEL-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, s2
+; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-REAL16-NEXT: s_endpgm
+;
+; GISEL-FAKE16-LABEL: sat_pk4_u4_u8_f32_s:
+; GISEL-FAKE16: ; %bb.0:
+; GISEL-FAKE16-NEXT: s_clause 0x1
+; GISEL-FAKE16-NEXT: s_load_b32 s2, s[4:5], 0x0
+; GISEL-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GISEL-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v0, s2
+; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = call i16 @llvm.amdgcn.sat.pk4.u4.u8(i32 %src) #0
+ store i16 %cvt, ptr %out, align 2
+ ret void
+}
+
+define amdgpu_kernel void @sat_pk4_u4_u8_f32_i(ptr %out) #1 {
+; SDAG-REAL16-LABEL: sat_pk4_u4_u8_f32_i:
+; SDAG-REAL16: ; %bb.0:
+; SDAG-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; SDAG-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, 0x64
+; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0
+; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; SDAG-REAL16-NEXT: s_endpgm
+;
+; SDAG-FAKE16-LABEL: sat_pk4_u4_u8_f32_i:
+; SDAG-FAKE16: ; %bb.0:
+; SDAG-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v1, 0x64
+; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1]
+; SDAG-FAKE16-NEXT: s_endpgm
+;
+; GISEL-REAL16-LABEL: sat_pk4_u4_u8_f32_i:
+; GISEL-REAL16: ; %bb.0:
+; GISEL-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GISEL-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, 0x64
+; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0
+; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-REAL16-NEXT: s_endpgm
+;
+; GISEL-FAKE16-LABEL: sat_pk4_u4_u8_f32_i:
+; GISEL-FAKE16: ; %bb.0:
+; GISEL-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GISEL-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v0, 0x64
+; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1]
+; GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = call i16 @llvm.amdgcn.sat.pk4.u4.u8(i32 100) #0
+ store i16 %cvt, ptr %out, align 2
+ ret void
+}
+
+attributes #0 = { nounwind memory(none) }
+attributes #1 = { nounwind }
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1-fake16.s
index f2cf3d58fb0cf..811c6ebfe0161 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1-fake16.s
@@ -628,6 +628,57 @@ v_cvt_f32_fp8_e32 v1, 3
v_cvt_f32_fp8_e32 v1, v3
// GFX1250: v_cvt_f32_fp8_e32 v1, v3 ; encoding: [0x03,0xd9,0x02,0x7e]
+v_cvt_pk_f32_bf8_e32 v[2:3], s3
+// GFX1250: v_cvt_pk_f32_bf8_e32 v[2:3], s3 ; encoding: [0x03,0xde,0x04,0x7e]
+
+v_cvt_pk_f32_bf8_e32 v[4:5], s5
+// GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], s5 ; encoding: [0x05,0xde,0x08,0x7e]
+
+v_cvt_pk_f32_bf8_e32 v[2:3], 3
+// GFX1250: v_cvt_pk_f32_bf8_e32 v[2:3], 3 ; encoding: [0x83,0xde,0x04,0x7e]
+
+v_cvt_pk_f32_bf8...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/149528
More information about the llvm-branch-commits
mailing list