[llvm] [AMDGPU] Enable reordering of VMEM loads during clustering (PR #107986)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 10 19:37:55 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Carl Ritson (perlfu)
<details>
<summary>Changes</summary>
Add fine grain control over ReorderWhileClustering by adding
canReorderClusterMemOps query to TargetInstrInfo.
Implement this to true for RISC to maintain current behaviour.
On AMDGPU enable ReorderWhileClustering for loads and implement
canReorderClusterMemOps to reject reordering for operations other
than VMEM.
The intention of doing this is to allow some additional overlap
of computation with memory loads with large clauses.
Loads will be issued in an order closer to their usage more often,
producing more incremental s_waitcnt values.
On average this yields a very small reduction in VGPR pressure,
although edge cases may see increased pressure.
Reordering SMEM/LDS access is not beneficial as these must always
be waitcnt 0.
For the benefit of future tuning add support for function metadata
"amdgpu-reorder-loads-while-clustering" (set to either 0 or 1) to
disable/enable reordering behaviour per function.
---
Patch is 199.93 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/107986.diff
24 Files Affected:
- (modified) llvm/include/llvm/CodeGen/TargetInstrInfo.h (+10)
- (modified) llvm/lib/CodeGen/MachineScheduler.cpp (+3-1)
- (modified) llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (+22-5)
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.cpp (+17)
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.h (+4)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfo.h (+6)
- (modified) llvm/test/CodeGen/AMDGPU/bf16.ll (+88-88)
- (modified) llvm/test/CodeGen/AMDGPU/call-argument-types.ll (+17-16)
- (modified) llvm/test/CodeGen/AMDGPU/cluster_stores.ll (+44-44)
- (modified) llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll (+4-4)
- (modified) llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll (+38-36)
- (modified) llvm/test/CodeGen/AMDGPU/insert_waitcnt_for_precise_memory.ll (+2-2)
- (modified) llvm/test/CodeGen/AMDGPU/issue92561-restore-undef-scc-verifier-error.ll (+4-3)
- (modified) llvm/test/CodeGen/AMDGPU/live-interval-bug-in-rename-independent-subregs.mir (+32-32)
- (modified) llvm/test/CodeGen/AMDGPU/load-global-i16.ll (+88-88)
- (modified) llvm/test/CodeGen/AMDGPU/load-global-i32.ll (+179-178)
- (modified) llvm/test/CodeGen/AMDGPU/memcpy-libcall.ll (+228-206)
- (modified) llvm/test/CodeGen/AMDGPU/memcpy-param-combinations.ll (+72-72)
- (modified) llvm/test/CodeGen/AMDGPU/memmove-param-combinations.ll (+54-54)
- (modified) llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll (+4-4)
- (modified) llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll (+1-1)
- (modified) llvm/test/CodeGen/AMDGPU/select.f16.ll (+51-54)
- (modified) llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll (+7-7)
- (modified) llvm/test/CodeGen/AMDGPU/wqm.ll (+7-7)
``````````diff
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index 65c5788ac5cc9f..240037687ab133 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -1574,6 +1574,16 @@ class TargetInstrInfo : public MCInstrInfo {
llvm_unreachable("target did not implement shouldClusterMemOps()");
}
+ /// Returns true if the two given memory operations can be reordered
+ /// while clustering.
+ /// Will only be queried if ReorderWhileClustering is enabled and
+ /// shouldClusterMemOps already returned true for the same operation pair.
+ virtual bool
+ canReorderClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
+ ArrayRef<const MachineOperand *> BaseOps2) const {
+ llvm_unreachable("target did not implement canReorderClusterMemOps()");
+ }
+
/// Reverses the branch condition of the specified condition list,
/// returning false on success and true if it cannot be reversed.
virtual bool
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 4e6d34346b1d80..876189a5d15621 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -1891,7 +1891,9 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
SUnit *SUa = MemOpa.SU;
SUnit *SUb = MemOpb.SU;
- if (!ReorderWhileClustering && SUa->NodeNum > SUb->NodeNum)
+ if (SUa->NodeNum > SUb->NodeNum &&
+ (!ReorderWhileClustering ||
+ !TII->canReorderClusterMemOps(MemOpa.BaseOps, MemOpb.BaseOps)))
std::swap(SUa, SUb);
// FIXME: Is this check really required?
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 9c9c5051393730..1aa07c705e8218 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -393,6 +393,11 @@ static cl::opt<bool>
cl::desc("Enable AMDGPUAttributorPass"),
cl::init(true), cl::Hidden);
+static cl::opt<bool> ReorderWhileLoadClustering(
+ "amdgpu-reorder-while-load-clustering",
+ cl::desc("Enable reordering during load clustering"), cl::init(true),
+ cl::Hidden);
+
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
// Register the target
RegisterTargetMachine<R600TargetMachine> X(getTheR600Target());
@@ -483,12 +488,20 @@ static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
return new SIScheduleDAGMI(C);
}
+static bool getReorderWhileLoadClustering(const MachineFunction *MF) {
+ if (!ReorderWhileLoadClustering)
+ return false;
+ return !!MF->getFunction().getFnAttributeAsParsedInteger(
+ "amdgpu-reorder-loads-while-clustering", 1);
+}
+
static ScheduleDAGInstrs *
createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
ScheduleDAGMILive *DAG =
new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
- DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
+ DAG->addMutation(createLoadClusterDAGMutation(
+ DAG->TII, DAG->TRI, getReorderWhileLoadClustering(C->MF)));
if (ST.shouldClusterStores())
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
@@ -510,7 +523,8 @@ createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
auto DAG = new GCNIterativeScheduler(C,
GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
- DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
+ DAG->addMutation(createLoadClusterDAGMutation(
+ DAG->TII, DAG->TRI, getReorderWhileLoadClustering(C->MF)));
if (ST.shouldClusterStores())
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
return DAG;
@@ -526,7 +540,8 @@ createIterativeILPMachineScheduler(MachineSchedContext *C) {
const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
auto DAG = new GCNIterativeScheduler(C,
GCNIterativeScheduler::SCHEDULE_ILP);
- DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
+ DAG->addMutation(createLoadClusterDAGMutation(
+ DAG->TII, DAG->TRI, getReorderWhileLoadClustering(C->MF)));
if (ST.shouldClusterStores())
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
@@ -967,7 +982,8 @@ class GCNPassConfig final : public AMDGPUPassConfig {
C, std::make_unique<PostGenericScheduler>(C),
/*RemoveKillFlags=*/true);
const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
- DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
+ DAG->addMutation(createLoadClusterDAGMutation(
+ DAG->TII, DAG->TRI, getReorderWhileLoadClustering(C->MF)));
if (ST.shouldClusterStores())
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
@@ -1207,7 +1223,8 @@ llvm::ScheduleDAGInstrs *
AMDGPUPassConfig::createMachineScheduler(MachineSchedContext *C) const {
const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
ScheduleDAGMILive *DAG = createGenericSchedLive(C);
- DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
+ DAG->addMutation(createLoadClusterDAGMutation(
+ DAG->TII, DAG->TRI, getReorderWhileLoadClustering(C->MF)));
if (ST.shouldClusterStores())
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
return DAG;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index c6f28af1e5e731..c8e14a8b47c8c5 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -581,6 +581,23 @@ bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
return NumDWORDs <= 8;
}
+bool SIInstrInfo::canReorderClusterMemOps(
+ ArrayRef<const MachineOperand *> BaseOps1,
+ ArrayRef<const MachineOperand *> BaseOps2) const {
+ const MachineInstr *FirstLdSt =
+ !BaseOps1.empty() ? BaseOps1.front()->getParent() : nullptr;
+ const MachineInstr *SecondLdSt =
+ !BaseOps2.empty() ? BaseOps2.front()->getParent() : nullptr;
+ ;
+
+ if (!FirstLdSt || !isVMEM(*FirstLdSt))
+ return false;
+ if (!SecondLdSt || !isVMEM(*SecondLdSt))
+ return false;
+
+ return true;
+}
+
// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
// the first 16 loads will be interleaved with the stores, and the next 16 will
// be clustered as expected. It should really split into 2 16 store batches.
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 71432510fdee4f..70fbb7966c6bfd 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -250,6 +250,10 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
unsigned ClusterSize,
unsigned NumBytes) const override;
+ bool canReorderClusterMemOps(
+ ArrayRef<const MachineOperand *> BaseOps1,
+ ArrayRef<const MachineOperand *> BaseOps2) const override;
+
bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0,
int64_t Offset1, unsigned NumLoads) const override;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 457db9b9860d00..28b69a7e645c66 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -179,6 +179,12 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
unsigned ClusterSize,
unsigned NumBytes) const override;
+ bool canReorderClusterMemOps(
+ ArrayRef<const MachineOperand *> BaseOps1,
+ ArrayRef<const MachineOperand *> BaseOps2) const override {
+ return true;
+ }
+
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
const MachineOperand *&BaseOp,
int64_t &Offset, LocationSize &Width,
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index d9ce1e4efe0e50..869f5f0e8c2f6c 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -1865,21 +1865,19 @@ define void @v_store_global_v64bf16(<64 x bfloat> %val, ptr addrspace(1) %ptr) {
; GFX7-NEXT: v_alignbit_b32 v7, v0, v1, 16
; GFX7-NEXT: s_waitcnt vmcnt(9)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v37
-; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v28
-; GFX7-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:64
; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: s_waitcnt vmcnt(9)
-; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v38
-; GFX7-NEXT: v_alignbit_b32 v4, v33, v4, 16
; GFX7-NEXT: s_waitcnt vmcnt(8)
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v38
+; GFX7-NEXT: s_waitcnt vmcnt(7)
; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v39
; GFX7-NEXT: v_alignbit_b32 v36, v0, v1, 16
-; GFX7-NEXT: s_waitcnt vmcnt(6)
+; GFX7-NEXT: s_waitcnt vmcnt(5)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v49
+; GFX7-NEXT: v_alignbit_b32 v4, v33, v4, 16
; GFX7-NEXT: v_lshrrev_b32_e32 v18, 16, v18
; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v48
; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: s_waitcnt vmcnt(5)
+; GFX7-NEXT: s_waitcnt vmcnt(4)
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v50
; GFX7-NEXT: v_alignbit_b32 v35, v18, v19, 16
; GFX7-NEXT: v_alignbit_b32 v34, v0, v1, 16
@@ -1888,11 +1886,14 @@ define void @v_store_global_v64bf16(<64 x bfloat> %val, ptr addrspace(1) %ptr) {
; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:24
; GFX7-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:20
; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:16
+; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v28
; GFX7-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:12
-; GFX7-NEXT: s_waitcnt vmcnt(8)
-; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; GFX7-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:64
+; GFX7-NEXT: s_waitcnt vmcnt(9)
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX7-NEXT: s_waitcnt vmcnt(8)
+; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_alignbit_b32 v33, v6, v14, 16
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v17
; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
@@ -1900,18 +1901,18 @@ define void @v_store_global_v64bf16(<64 x bfloat> %val, ptr addrspace(1) %ptr) {
; GFX7-NEXT: v_alignbit_b32 v6, v6, v14, 16
; GFX7-NEXT: s_waitcnt vmcnt(7)
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v15
-; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v14
; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v30
-; GFX7-NEXT: buffer_store_dwordx4 v[33:36], v[31:32], s[4:7], 0 addr64 offset:96
; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v29
+; GFX7-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:8
+; GFX7-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:4
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX7-NEXT: buffer_store_dwordx4 v[33:36], v[31:32], s[4:7], 0 addr64 offset:96
; GFX7-NEXT: v_alignbit_b32 v17, v14, v15, 16
; GFX7-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:52
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v27
; GFX7-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:48
; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v26
; GFX7-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:44
-; GFX7-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:8
-; GFX7-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:4
; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60
; GFX7-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:56
; GFX7-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:40
@@ -1929,28 +1930,27 @@ define void @v_store_global_v64bf16(<64 x bfloat> %val, ptr addrspace(1) %ptr) {
; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_alignbit_b32 v21, v0, v1, 16
-; GFX7-NEXT: s_waitcnt vmcnt(13)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v18
; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: s_waitcnt vmcnt(12)
+; GFX7-NEXT: s_waitcnt vmcnt(13)
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v19
; GFX7-NEXT: v_alignbit_b32 v20, v0, v1, 16
-; GFX7-NEXT: s_waitcnt vmcnt(11)
+; GFX7-NEXT: s_waitcnt vmcnt(12)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v22
; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: s_waitcnt vmcnt(10)
+; GFX7-NEXT: s_waitcnt vmcnt(11)
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v23
; GFX7-NEXT: v_alignbit_b32 v19, v0, v1, 16
-; GFX7-NEXT: s_waitcnt vmcnt(8)
-; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v35
-; GFX7-NEXT: s_waitcnt vmcnt(5)
+; GFX7-NEXT: s_waitcnt vmcnt(9)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v29
; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: s_waitcnt vmcnt(4)
+; GFX7-NEXT: s_waitcnt vmcnt(8)
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v30
; GFX7-NEXT: v_alignbit_b32 v18, v0, v1, 16
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v28
; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX7-NEXT: s_waitcnt vmcnt(6)
+; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v35
; GFX7-NEXT: s_waitcnt vmcnt(3)
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v33
; GFX7-NEXT: s_waitcnt vmcnt(2)
@@ -35787,15 +35787,15 @@ define <16 x bfloat> @v_select_v16bf16(i1 %cond, <16 x bfloat> %a, <16 x bfloat>
; GFX7-NEXT: v_alignbit_b32 v8, v8, v17, 16
; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v10
; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v25
-; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GFX7-NEXT: v_alignbit_b32 v10, v10, v17, 16
+; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
+; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v28
; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v12
; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
-; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v28
-; GFX7-NEXT: v_alignbit_b32 v11, v12, v11, 16
-; GFX7-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:4
; GFX7-NEXT: v_lshrrev_b32_e32 v17, 16, v17
; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v27
+; GFX7-NEXT: v_alignbit_b32 v11, v12, v11, 16
+; GFX7-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:4
; GFX7-NEXT: v_alignbit_b32 v17, v17, v18, 16
; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
@@ -36178,17 +36178,17 @@ define <32 x bfloat> @v_select_v32bf16(i1 %cond, <32 x bfloat> %a, <32 x bfloat>
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v10
; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v9
-; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GFX7-NEXT: v_alignbit_b32 v5, v5, v6, 16
; GFX7-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:12
; GFX7-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
; GFX7-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:24
; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40
+; GFX7-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:8
+; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
; GFX7-NEXT: v_lshrrev_b32_e32 v18, 16, v18
; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
; GFX7-NEXT: v_alignbit_b32 v17, v18, v17, 16
; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:76
-; GFX7-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:8
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v14
@@ -36225,6 +36225,7 @@ define <32 x bfloat> @v_select_v32bf16(i1 %cond, <32 x bfloat> %a, <32 x bfloat>
; GFX7-NEXT: v_lshrrev_b32_e32 v30, 16, v30
; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
; GFX7-NEXT: v_alignbit_b32 v29, v30, v29, 16
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32
; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GFX7-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32
@@ -36238,52 +36239,50 @@ define <32 x bfloat> @v_select_v32bf16(i1 %cond, <32 x bfloat> %a, <32 x bfloat>
; GFX7-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:108
; GFX7-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:124
; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:128
-; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32
; GFX7-NEXT: s_waitcnt vmcnt(14)
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
-; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
; GFX7-NEXT: s_waitcnt vmcnt(13)
-; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
-; GFX7-NEXT: s_waitcnt vmcnt(12)
; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
; GFX7-NEXT: v_alignbit_b32 v6, v6, v7, 16
; GFX7-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:20
-; GFX7-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX7-NEXT: s_waitcnt vmcnt(13)
+; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
+; GFX7-NEXT: v_lshrrev_b32_e32 v18, 16, v18
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v1
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX7-NEXT: s_waitcnt vmcnt(12)
-; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
; GFX7-NEXT: s_waitcnt vmcnt(11)
+; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
+; GFX7-NEXT: s_waitcnt vmcnt(10)
; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX7-NEXT: s_waitcnt vmcnt(9)
+; GFX7-NEXT: s_waitcnt vmcnt(8)
; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX7-NEXT: s_waitcnt vmcnt(7)
+; GFX7-NEXT: s_waitcnt vmcnt(6)
; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
; GFX7-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX7-NEXT: s_waitcnt vmcnt(6)
+; GFX7-NEXT: s_waitcnt vmcnt(5)
; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
; GFX7-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX7-NEXT: s_waitcnt vmcnt(5)
+; GFX7-NEXT: s_waitcnt vmcnt(4)
; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
; GFX7-NEXT: v_lshrrev_b32_e32 v22, 16, v22
; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
; GFX7-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX7-NEXT: s_waitcnt vmcnt(4)
+; GFX7-NEXT: s_waitcnt vmcnt(3)
; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
; GFX7-NEXT: v_lshrrev_b32_e32 v26, 16, v26
; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
; GFX7-NEXT: v_lshrrev_b32_e32 v28, 16, v28
-; GFX7-NEXT: s_waitcnt vmcnt(3)
+; GFX7-NEXT: s_waitcnt vmcnt(2)
; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
; GFX7-NEXT: v_lshrrev_b32_e32 v30, 16, v30
; GFX7-NEXT: s_waitcnt vmcnt(1)
-; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
@@ -36384,6 +36383,7 @@ define <32 x bfloat> @v_select_v32bf16(i1 %cond, <32 x bfloat> %a, <32 x bfloat>
; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
; GFX7-NEXT: v_alignbit_b32 v30, v30, v31, 16
; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
; GFX7-NEXT: v_cndmask_b32_e32 v29, v30, v29, vcc
; GFX7-NEXT: v_lshlrev_b32_e32 v28, 16, v29
; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
@@ -37633,27 +37633,27 @@ define <16 x bfloat> @v_vselect_v16bf16(<16 x i1> %cond, <16 x bfloat> %a, <16 x
; GFX7-NEXT: v_cmp_eq_u32_e64 s[24:25], 1, v0
; GFX7-NEXT: buffer_load_dword v0, off, s[0:3], s32
; GFX7-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:64
-; GFX7-NEXT: v_and_b32_e32 v2, 1, v12
; GFX7-NEXT: v_writelane_b32 v31, s30, 0
-; GFX7-NEXT: v_cmp_eq_u32_e64 s[26:27], 1, v2
-; GFX7-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:60
-; GFX7-NEXT: v_and_b32_e32 v3, 1, v13
; GFX7-NEXT: v_writelane_b32 v31, s31, 1
-; GFX7-NEXT: v_cmp_eq_u32_e64 s[28:29], 1, v3
-; GFX7-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:5...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/107986
More information about the llvm-commits
mailing list