[llvm-branch-commits] [llvm] AMDGPU: Add codegen support for gfx950 v_ashr_pk_i8/u8_i32 (PR #118304)
Matt Arsenault via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Dec 2 06:35:08 PST 2024
https://github.com/arsenm created https://github.com/llvm/llvm-project/pull/118304
Co-authored-by: Sirish Pande <Sirish.Pande at amd.com>
>From 0ed129dd231bb2792690f381ba542d92d74b19a1 Mon Sep 17 00:00:00 2001
From: Sirish Pande <Sirish.Pande at amd.com>
Date: Mon, 6 May 2024 10:20:30 -0500
Subject: [PATCH] AMDGPU: Add codegen support for gfx950 v_ashr_pk_i8/u8_i32
Co-authored-by: Sirish Pande <Sirish.Pande at amd.com>
---
llvm/lib/Target/AMDGPU/VOP3Instructions.td | 17 +++++++
llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll | 55 ++++++++++++++++++++++
2 files changed, 72 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 33050b718a484e..e30f873ea12e1a 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -1453,6 +1453,23 @@ let SubtargetPredicate = HasAshrPkInsts, isReMaterializable = 1 in {
defm V_ASHR_PK_U8_I32 : VOP3Inst<"v_ashr_pk_u8_i32", VOP3_Profile<VOP_I16_I32_I32_I32, VOP3_OPSEL_ONLY>, int_amdgcn_ashr_pk_u8_i32>;
} // End SubtargetPredicate = HasAshrPkInsts, isReMaterializable = 1
+class AshrPkI8Pat<VOP3_Pseudo inst, int lo, int hi>: GCNPat<
+ (i16 (or (i16 (shl (i16 (trunc (i32 (AMDGPUsmed3 (i32 (sra i32:$src1, i32:$src2)), (i32 lo), (i32 hi))))), (i16 8))),
+ (i16 (and (i16 (trunc (i32 (AMDGPUsmed3 (i32 (sra i32:$src0, i32:$src2)), (i32 lo), (i32 hi))))), (i16 255))))),
+ (inst 0, VSrc_b32:$src0, 0, VSrc_b32:$src1, 0, VSrc_b32:$src2, 0 )
+>;
+
+class AshrPkU8Pat<VOP3_Pseudo inst, int lo, int hi>: GCNPat<
+ (i16 (or (i16 (shl (i16 (trunc (i32 (AMDGPUsmed3 (i32 (sra i32:$src1, i32:$src2)), (i32 lo), (i32 hi))))), (i16 8))),
+ (i16 (trunc (i32 (AMDGPUsmed3 (i32 (sra i32:$src0, i32:$src2)), (i32 lo), (i32 hi))))))),
+ (inst 0, VSrc_b32:$src0, 0, VSrc_b32:$src1, 0, VSrc_b32:$src2, 0 )
+>;
+
+let SubtargetPredicate = HasAshrPkInsts in {
+ def : AshrPkI8Pat<V_ASHR_PK_I8_I32_e64, -128, 127>;
+ def : AshrPkU8Pat<V_ASHR_PK_U8_I32_e64, 0, 255>;
+}
+
//===----------------------------------------------------------------------===//
// Integer Clamp Patterns
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
new file mode 100644
index 00000000000000..366a13dc9b7c4c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=amdgcn -mcpu=gfx950 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX950 %s
+define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 {
+; GFX950-LABEL: v_ashr_pk_i8_i32:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_load_dword s2, s[0:1], 0x34
+; GFX950-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX950-NEXT: v_mov_b32_e32 v0, 0
+; GFX950-NEXT: s_waitcnt lgkmcnt(0)
+; GFX950-NEXT: s_and_b32 s0, s2, 31
+; GFX950-NEXT: v_mov_b32_e32 v1, s7
+; GFX950-NEXT: v_mov_b32_e32 v2, s0
+; GFX950-NEXT: v_ashr_pk_i8_i32 v1, s6, v1, v2
+; GFX950-NEXT: global_store_short v0, v1, s[4:5]
+; GFX950-NEXT: s_endpgm
+ %insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
+ %build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
+ %src2.clamp = and i32 %src2, 31
+ %insert.1 = insertelement <2 x i32> poison, i32 %src2.clamp, i64 0
+ %src2.broadcast = shufflevector <2 x i32> %insert.1, <2 x i32> poison, <2 x i32> zeroinitializer
+ %ashr = ashr <2 x i32> %build_vector, %src2.broadcast
+ %sat.low = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %ashr, <2 x i32> <i32 -128, i32 -128>)
+ %sat.hi = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %sat.low, <2 x i32> <i32 127, i32 127>)
+ %trunc = trunc nsw <2 x i32> %sat.hi to <2 x i8>
+ %ret = bitcast <2 x i8> %trunc to i16
+ store i16 %ret, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @v_ashr_pk_u8_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 {
+; GFX950-LABEL: v_ashr_pk_u8_i32:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_load_dword s2, s[0:1], 0x34
+; GFX950-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX950-NEXT: v_mov_b32_e32 v0, 0
+; GFX950-NEXT: s_waitcnt lgkmcnt(0)
+; GFX950-NEXT: s_and_b32 s0, s2, 31
+; GFX950-NEXT: v_mov_b32_e32 v1, s7
+; GFX950-NEXT: v_mov_b32_e32 v2, s0
+; GFX950-NEXT: v_ashr_pk_u8_i32 v1, s6, v1, v2
+; GFX950-NEXT: global_store_short v0, v1, s[4:5]
+; GFX950-NEXT: s_endpgm
+ %insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
+ %build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
+ %src2.clamp = and i32 %src2, 31
+ %insert.1 = insertelement <2 x i32> poison, i32 %src2.clamp, i64 0
+ %src2.broadcast = shufflevector <2 x i32> %insert.1, <2 x i32> poison, <2 x i32> zeroinitializer
+ %ashr = ashr <2 x i32> %build_vector, %src2.broadcast
+ %sat.low = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %ashr, <2 x i32> <i32 0, i32 0>)
+ %sat.hi = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %sat.low, <2 x i32> <i32 255, i32 255>)
+ %trunc = trunc nsw <2 x i32> %sat.hi to <2 x i8>
+ %ret = bitcast <2 x i8> %trunc to i16
+ store i16 %ret, ptr addrspace(1) %out
+ ret void
+}
More information about the llvm-branch-commits
mailing list