[llvm] [AMDGPU] Filter candidates of LiveRegOptimizer for profitable cases (PR #124624)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 27 12:58:01 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: choikwa (choikwa)
<details>
<summary>Changes</summary>
It is known that for vector whose element fits in i16 will be split and scalarized in SelectionDag's type legalizer
(see SIISelLowering::getPreferredVectorAction).
LRO attempts to undo the scalarizing of vectors across basic block boundary and shoehorn Values in VGPRs. LRO is beneficial for operations that natively work on illegal vector types to prevent flip-flopping between SGPR and VGPR. If we know that operations on vector will be split and scalarized, then we don't want to shoehorn them back to VGPR.
Operations that we know to work natively on illegal vector types usually come in the form of intrinsics (MFMA, DOT8), buffer store, shuffle, phi nodes to name a few.
---
Patch is 337.61 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/124624.diff
7 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp (+51-6)
- (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll (+2001-209)
- (modified) llvm/test/CodeGen/AMDGPU/dagcomb-extract-vec-elt-different-sizes.ll (+19-20)
- (modified) llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll (+188-171)
- (modified) llvm/test/CodeGen/AMDGPU/extract-subvector.ll (+26-25)
- (modified) llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll (+24-12)
- (modified) llvm/test/CodeGen/AMDGPU/vni8-across-blocks.ll (+1896-148)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
index f4e651ec477d30..d64951001d9cba 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
@@ -14,6 +14,7 @@
#include "AMDGPU.h"
#include "AMDGPUTargetMachine.h"
+#include "AMDGPUTargetTransformInfo.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/UniformityAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -45,6 +46,7 @@ class AMDGPULateCodeGenPrepare
Function &F;
const DataLayout &DL;
const GCNSubtarget &ST;
+ const TargetTransformInfo &TTI;
AssumptionCache *const AC;
UniformityInfo &UA;
@@ -53,8 +55,9 @@ class AMDGPULateCodeGenPrepare
public:
AMDGPULateCodeGenPrepare(Function &F, const GCNSubtarget &ST,
+ const TargetTransformInfo &TTI,
AssumptionCache *AC, UniformityInfo &UA)
- : F(F), DL(F.getDataLayout()), ST(ST), AC(AC), UA(UA) {}
+ : F(F), DL(F.getDataLayout()), ST(ST), TTI(TTI), AC(AC), UA(UA) {}
bool run();
bool visitInstruction(Instruction &) { return false; }
@@ -75,6 +78,8 @@ class LiveRegOptimizer {
Module &Mod;
const DataLayout &DL;
const GCNSubtarget &ST;
+ const TargetTransformInfo &TTI;
+
/// The scalar type to convert to
Type *const ConvertToScalar;
/// The set of visited Instructions
@@ -125,8 +130,43 @@ class LiveRegOptimizer {
return LK.first != TargetLoweringBase::TypeLegal;
}
- LiveRegOptimizer(Module &Mod, const GCNSubtarget &ST)
- : Mod(Mod), DL(Mod.getDataLayout()), ST(ST),
+ // Filtering based on operation or its cost.
+ // If an operation incurs high enough cost or natively work on
+ // vector of illegal type, ie. v2i8, then it makes sense to try
+ // to avoid scalarizing across BB.
+ bool shouldReplaceBasedOnOp(Instruction *II) {
+ // Ignore pseudos
+ if (II->isDebugOrPseudoInst())
+ return false;
+
+ // Instruction Cost
+ const auto Cost = TTI.getInstructionCost(II,
+ TargetTransformInfo::TargetCostKind::TCK_SizeAndLatency);
+ LLVM_DEBUG(
+ dbgs() << "shouldReplaceBasedOnOp: " <<
+ *II << " Cost=" << Cost << '\n';
+ );
+ if (Cost >= 8)
+ return true;
+
+ // Intrinsics - assume they natively handle illegal type
+ if (dyn_cast<IntrinsicInst>(II))
+ return true;
+
+ // Stores
+ if (dyn_cast<StoreInst>(II))
+ return true;
+
+ // Shuffles
+ if (dyn_cast<ShuffleVectorInst>(II))
+ return true;
+
+ return false;
+ }
+
+ LiveRegOptimizer(Module &Mod, const GCNSubtarget &ST,
+ const TargetTransformInfo &TTI)
+ : Mod(Mod), DL(Mod.getDataLayout()), ST(ST), TTI(TTI),
ConvertToScalar(Type::getInt32Ty(Mod.getContext())) {}
};
@@ -140,7 +180,7 @@ bool AMDGPULateCodeGenPrepare::run() {
// vectors to equivalent vectors of legal type (which are converted back
// before uses in subsequent blocks), to pack the bits into fewer physical
// registers (used in CopyToReg/CopyFromReg pairs).
- LiveRegOptimizer LRO(*F.getParent(), ST);
+ LiveRegOptimizer LRO(*F.getParent(), ST, TTI);
bool Changed = false;
@@ -259,6 +299,9 @@ bool LiveRegOptimizer::optimizeLiveType(
if (!shouldReplace(II->getType()))
continue;
+ if (!shouldReplaceBasedOnOp(II))
+ continue;
+
if (PHINode *Phi = dyn_cast<PHINode>(II)) {
PhiNodes.insert(Phi);
// Collect all the incoming values of problematic PHI nodes.
@@ -478,11 +521,12 @@ bool AMDGPULateCodeGenPrepare::visitLoadInst(LoadInst &LI) {
PreservedAnalyses
AMDGPULateCodeGenPreparePass::run(Function &F, FunctionAnalysisManager &FAM) {
const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+ const TargetTransformInfo &TTI = TM.getTargetTransformInfo(F);
AssumptionCache &AC = FAM.getResult<AssumptionAnalysis>(F);
UniformityInfo &UI = FAM.getResult<UniformityInfoAnalysis>(F);
- bool Changed = AMDGPULateCodeGenPrepare(F, ST, &AC, UI).run();
+ bool Changed = AMDGPULateCodeGenPrepare(F, ST, TTI, &AC, UI).run();
if (!Changed)
return PreservedAnalyses::all();
@@ -518,13 +562,14 @@ bool AMDGPULateCodeGenPrepareLegacy::runOnFunction(Function &F) {
const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
const TargetMachine &TM = TPC.getTM<TargetMachine>();
const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+ const TargetTransformInfo &TTI = TM.getTargetTransformInfo(F);
AssumptionCache &AC =
getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
UniformityInfo &UI =
getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
- return AMDGPULateCodeGenPrepare(F, ST, &AC, UI).run();
+ return AMDGPULateCodeGenPrepare(F, ST, TTI, &AC, UI).run();
}
INITIALIZE_PASS_BEGIN(AMDGPULateCodeGenPrepareLegacy, DEBUG_TYPE,
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
index 9c2fabce4bcdeb..96a167794dfb7a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
@@ -6,36 +6,28 @@ define amdgpu_kernel void @v3i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX906-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GFX906-NEXT: v_mov_b32_e32 v4, 8
-; GFX906-NEXT: v_mov_b32_e32 v5, 16
-; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dword v3, v2, s[0:1]
-; GFX906-NEXT: v_mov_b32_e32 v1, 0xff
+; GFX906-NEXT: v_lshlrev_b32_e32 v4, 2, v0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: global_load_dword v1, v4, s[0:1]
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_and_b32_e32 v6, 0xff, v3
-; GFX906-NEXT: v_lshlrev_b32_sdwa v7, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
-; GFX906-NEXT: v_lshlrev_b32_sdwa v3, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
-; GFX906-NEXT: v_or3_b32 v3, v6, v7, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v3, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v2, 16, v1
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB0_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dword v0, v2, s[2:3]
+; GFX906-NEXT: global_load_dword v1, v4, s[2:3]
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v0
-; GFX906-NEXT: v_lshlrev_b32_sdwa v3, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
-; GFX906-NEXT: v_lshlrev_b32_sdwa v0, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
-; GFX906-NEXT: v_or3_b32 v3, v2, v3, v0
+; GFX906-NEXT: v_lshrrev_b32_e32 v3, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v2, 16, v1
; GFX906-NEXT: .LBB0_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v3
-; GFX906-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX906-NEXT: v_and_b32_e32 v0, 0xff, v3
; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_and_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: s_mov_b32 s0, 0xffff
; GFX906-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX906-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX906-NEXT: v_and_b32_sdwa v1, s0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX906-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX906-NEXT: v_mov_b32_e32 v1, 0
; GFX906-NEXT: global_store_short v1, v0, s[6:7]
@@ -63,19 +55,34 @@ define amdgpu_kernel void @v4i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX906-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; GFX906-NEXT: v_lshlrev_b32_e32 v5, 2, v0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dword v1, v2, s[0:1]
+; GFX906-NEXT: global_load_dword v1, v5, s[0:1]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_lshrrev_b32_e32 v2, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v4, 24, v1
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB1_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dword v1, v2, s[2:3]
+; GFX906-NEXT: global_load_dword v1, v5, s[2:3]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_lshrrev_b32_e32 v2, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v4, 24, v1
; GFX906-NEXT: .LBB1_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_mov_b32_e32 v0, 0
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX906-NEXT: v_mov_b32_e32 v5, 8
+; GFX906-NEXT: v_mov_b32_e32 v0, 0xff
+; GFX906-NEXT: v_lshlrev_b32_sdwa v2, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX906-NEXT: v_and_or_b32 v0, v1, v0, v2
+; GFX906-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX906-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX906-NEXT: v_lshlrev_b32_e32 v2, 24, v4
+; GFX906-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX906-NEXT: v_mov_b32_e32 v1, 0
+; GFX906-NEXT: global_store_dword v1, v0, s[6:7]
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
@@ -99,28 +106,30 @@ define amdgpu_kernel void @v5i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX906-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v3, 3, v0
+; GFX906-NEXT: v_lshlrev_b32_e32 v6, 3, v0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[0:1]
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v6, s[0:1]
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v3, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v4, 16, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v5, 24, v1
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB2_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[2:3]
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v6, s[2:3]
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v3, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v4, 16, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v5, 24, v1
; GFX906-NEXT: .LBB2_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_mov_b32_e32 v4, 0
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v1
-; GFX906-NEXT: v_lshrrev_b32_e32 v3, 24, v1
-; GFX906-NEXT: global_store_byte v4, v1, s[6:7]
-; GFX906-NEXT: global_store_byte v4, v0, s[6:7] offset:1
-; GFX906-NEXT: global_store_byte_d16_hi v4, v1, s[6:7] offset:2
-; GFX906-NEXT: global_store_byte v4, v3, s[6:7] offset:3
-; GFX906-NEXT: global_store_byte v4, v2, s[6:7] offset:4
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: global_store_byte v0, v1, s[6:7]
+; GFX906-NEXT: global_store_byte v0, v3, s[6:7] offset:1
+; GFX906-NEXT: global_store_byte v0, v4, s[6:7] offset:2
+; GFX906-NEXT: global_store_byte v0, v5, s[6:7] offset:3
+; GFX906-NEXT: global_store_byte v0, v2, s[6:7] offset:4
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
@@ -144,19 +153,46 @@ define amdgpu_kernel void @v8i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX906-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v3, 3, v0
+; GFX906-NEXT: v_lshlrev_b32_e32 v9, 3, v0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[0:1]
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v9, s[0:1]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_lshrrev_b32_e32 v3, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v4, 16, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v5, 24, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v6, 8, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v8, 24, v2
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB3_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dwordx2 v[1:2], v3, s[2:3]
+; GFX906-NEXT: global_load_dwordx2 v[1:2], v9, s[2:3]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_lshrrev_b32_e32 v3, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v4, 16, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v5, 24, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v6, 8, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v8, 24, v2
; GFX906-NEXT: .LBB3_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_mov_b32_e32 v0, 0
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: global_store_dwordx2 v0, v[1:2], s[6:7]
+; GFX906-NEXT: v_mov_b32_e32 v10, 8
+; GFX906-NEXT: v_mov_b32_e32 v9, 0xff
+; GFX906-NEXT: v_lshlrev_b32_sdwa v0, v10, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX906-NEXT: v_and_or_b32 v0, v1, v9, v0
+; GFX906-NEXT: v_and_b32_e32 v1, 0xff, v4
+; GFX906-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX906-NEXT: v_lshlrev_b32_e32 v3, 24, v5
+; GFX906-NEXT: v_or3_b32 v0, v0, v1, v3
+; GFX906-NEXT: v_lshlrev_b32_sdwa v1, v10, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX906-NEXT: v_and_or_b32 v1, v2, v9, v1
+; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v7
+; GFX906-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX906-NEXT: v_lshlrev_b32_e32 v3, 24, v8
+; GFX906-NEXT: v_or3_b32 v1, v1, v2, v3
+; GFX906-NEXT: v_mov_b32_e32 v2, 0
+; GFX906-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
@@ -180,19 +216,70 @@ define amdgpu_kernel void @v16i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX906-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v5, 4, v0
+; GFX906-NEXT: v_lshlrev_b32_e32 v17, 4, v0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dwordx4 v[1:4], v5, s[0:1]
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v17, s[0:1]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_lshrrev_b32_e32 v5, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v8, 8, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v9, 16, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v10, 24, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v11, 8, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v12, 16, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v13, 24, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v14, 8, v4
+; GFX906-NEXT: v_lshrrev_b32_e32 v15, 16, v4
+; GFX906-NEXT: v_lshrrev_b32_e32 v16, 24, v4
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB4_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
-; GFX906-NEXT: global_load_dwordx4 v[1:4], v5, s[2:3]
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v17, s[2:3]
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_lshrrev_b32_e32 v5, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v8, 8, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v9, 16, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v10, 24, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v11, 8, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v12, 16, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v13, 24, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v14, 8, v4
+; GFX906-NEXT: v_lshrrev_b32_e32 v15, 16, v4
+; GFX906-NEXT: v_lshrrev_b32_e32 v16, 24, v4
; GFX906-NEXT: .LBB4_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[0:1]
-; GFX906-NEXT: v_mov_b32_e32 v0, 0
-; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: global_store_dwordx4 v0, v[1:4], s[6:7]
+; GFX906-NEXT: v_mov_b32_e32 v18, 8
+; GFX906-NEXT: v_mov_b32_e32 v17, 0xff
+; GFX906-NEXT: v_lshlrev_b32_sdwa v0, v18, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX906-NEXT: v_and_or_b32 v0, v1, v17, v0
+; GFX906-NEXT: v_and_b32_e32 v1, 0xff, v6
+; GFX906-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX906-NEXT: v_lshlrev_b32_e32 v5, 24, v7
+; GFX906-NEXT: v_or3_b32 v0, v0, v1, v5
+; GFX906-NEXT: v_lshlrev_b32_sdwa v1, v18, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX906-NEXT: v_and_or_b32 v1, v2, v17, v1
+; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v9
+; GFX906-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX906-NEXT: v_lshlrev_b32_e32 v5, 24, v10
+; GFX906-NEXT: v_or3_b32 v1, v1, v2, v5
+; GFX906-NEXT: v_lshlrev_b32_sdwa v2, v18, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX906-NEXT: v_and_or_b32 v2, v3, v17, v2
+; GFX906-NEXT: v_and_b32_e32 v3, 0xff, v12
+; GFX906-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX906-NEXT: v_lshlrev_b32_e32 v5, 24, v13
+; GFX906-NEXT: v_or3_b32 v2, v2, v3, v5
+; GFX906-NEXT: v_lshlrev_b32_sdwa v3, v18, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX906-NEXT: v_and_or_b32 v3, v4, v17, v3
+; GFX906-NEXT: v_and_b32_e32 v4, 0xff, v15
+; GFX906-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX906-NEXT: v_lshlrev_b32_e32 v5, 24, v16
+; GFX906-NEXT: v_or3_b32 v3, v3, v4, v5
+; GFX906-NEXT: v_mov_b32_e32 v4, 0
+; GFX906-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; GFX906-NEXT: s_endpgm
entry:
%idx = call i32 @llvm.amdgcn.workitem.id.x()
@@ -216,23 +303,123 @@ define amdgpu_kernel void @v32i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX906-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
-; GFX906-NEXT: v_lshlrev_b32_e32 v9, 5, v0
+; GFX906-NEXT: v_lshlrev_b32_e32 v32, 5, v0
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dwordx4 v[1:4], v9, s[0:1]
-; GFX906-NEXT: global_load_dwordx4 v[5:8], v9, s[0:1] offset:16
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v32, s[0:1]
+; GFX906-NEXT: global_load_dwordx4 v[1:4], v32, s[0:1] offset:16
+; GFX906-NEXT: s_waitcnt vmcnt(1)
+; GFX906-NEXT: v_lshrrev_b32_e32 v31, 8, v5
+; GFX906-NEXT: v_lshrrev_b32_e32 v26, 16, v5
+; GFX906-NEXT: v_lshrrev_b32_e32 v27, 24, v5
+; GFX906-NEXT: v_lshrrev_b32_e32 v28, 8, v6
+; GFX906-NEXT: v_lshrrev_b32_e32 v29, 16, v6
+; GFX906-NEXT: v_lshrrev_b32_e32 v30, 24, v6
+; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v7
+; GFX906-NEXT: v_lshrrev_b32_e32 v9, 16, v7
+; GFX906-NEXT: v_lshrrev_b32_e32 v10, 24, v7
+; GFX906-NEXT: v_lshrrev_b32_e32 v11, 8, v8
+; GFX906-NEXT: v_lshrrev_b32_e32 v12, 16, v8
+; GFX906-NEXT: v_lshrrev_b32_e32 v13, 24, v8
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_lshrrev_b32_e32 v14, 8, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v16, 24, v1
+; GFX906-NEXT: v_lshrrev_b32_e32 v17, 8, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v18, 16, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v19, 24, v2
+; GFX906-NEXT: v_lshrrev_b32_e32 v20, 8, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v21, 16, v3
+; GFX906-NEXT: v_lshrrev_b32_e32 v22...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/124624
More information about the llvm-commits
mailing list