[llvm] [AMDGPU][AtomicOptimizer] Fix DT update for divergent values with Iterative strategy (PR #87605)
Pierre van Houtryve via llvm-commits
llvm-commits at lists.llvm.org
Sun Apr 7 23:13:57 PDT 2024
https://github.com/Pierre-vh updated https://github.com/llvm/llvm-project/pull/87605
>From 83d1f8a4c3442a0030cd7d150af470b5e6d3e197 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Thu, 4 Apr 2024 08:36:54 +0200
Subject: [PATCH 1/2] [AMDGPU][AtomicOptimizer] Fix DT update for Iterative +
Divergent values
We take the terminator from EntryBB and put it in ComputeEnd. Make sure we also move the DT
edges, we previously only did it assuming a non-conditional branch.
---
.../Target/AMDGPU/AMDGPUAtomicOptimizer.cpp | 18 ++-
.../atomic_optimization_split_dt_update.ll | 153 ++++++++++++++++++
2 files changed, 166 insertions(+), 5 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/atomic_optimization_split_dt_update.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index dbb3de76b4ddae..8d3b2e82817052 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -743,7 +743,7 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
Function *F = I.getFunction();
LLVMContext &C = F->getContext();
-
+
// For atomic sub, perform scan with add operation and allow one lane to
// subtract the reduced value later.
AtomicRMWInst::BinOp ScanOp = Op;
@@ -876,7 +876,7 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
BasicBlock *Predecessor = nullptr;
if (ValDivergent && ScanImpl == ScanOptions::Iterative) {
// Move terminator from I's block to ComputeEnd block.
- Instruction *Terminator = EntryBB->getTerminator();
+ BranchInst *Terminator = cast<BranchInst>(EntryBB->getTerminator());
B.SetInsertPoint(ComputeEnd);
Terminator->removeFromParent();
B.Insert(Terminator);
@@ -887,10 +887,18 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
B.CreateBr(ComputeLoop);
// Update the dominator tree for new control flow.
- DTU.applyUpdates(
+ SmallVector<DominatorTree::UpdateType, 6> DomTreeUpdates(
{{DominatorTree::Insert, EntryBB, ComputeLoop},
- {DominatorTree::Insert, ComputeLoop, ComputeEnd},
- {DominatorTree::Delete, EntryBB, SingleLaneTerminator->getParent()}});
+ {DominatorTree::Insert, ComputeLoop, ComputeEnd}});
+
+ // We're moving the terminator from EntryBB to ComputeEnd, make sure we move
+ // the DT edges as well.
+ for (auto *Succ : Terminator->successors()) {
+ DomTreeUpdates.push_back({DominatorTree::Insert, ComputeEnd, Succ});
+ DomTreeUpdates.push_back({DominatorTree::Delete, EntryBB, Succ});
+ }
+
+ DTU.applyUpdates(DomTreeUpdates);
Predecessor = ComputeEnd;
} else {
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimization_split_dt_update.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimization_split_dt_update.ll
new file mode 100644
index 00000000000000..3c99c2909c2101
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimization_split_dt_update.ll
@@ -0,0 +1,153 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -O3 -mtriple=amdgcn-- -mcpu=gfx908 %s -o - -verify-dom-info | FileCheck %s
+
+; Check we're properly adding an edge from ComputeEnd to the "End" block added by
+; SplitBlockAndInsertIfThen
+;
+; If the edge is not added, domtree verification will fail.
+
+declare i32 @quux()
+
+define amdgpu_kernel void @ham(ptr addrspace(4) %arg) {
+; CHECK-LABEL: ham:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; CHECK-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; CHECK-NEXT: s_mov_b32 s50, -1
+; CHECK-NEXT: s_mov_b32 s51, 0xe00000
+; CHECK-NEXT: s_add_u32 s48, s48, s9
+; CHECK-NEXT: s_addc_u32 s49, s49, 0
+; CHECK-NEXT: s_mov_b64 s[36:37], s[2:3]
+; CHECK-NEXT: s_mov_b32 s33, s8
+; CHECK-NEXT: s_add_u32 s8, s36, 44
+; CHECK-NEXT: s_addc_u32 s9, s37, 0
+; CHECK-NEXT: s_mov_b64 s[38:39], s[0:1]
+; CHECK-NEXT: s_getpc_b64 s[0:1]
+; CHECK-NEXT: s_add_u32 s0, s0, quux at gotpcrel32@lo+4
+; CHECK-NEXT: s_addc_u32 s1, s1, quux at gotpcrel32@hi+12
+; CHECK-NEXT: s_load_dwordx2 s[42:43], s[0:1], 0x0
+; CHECK-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; CHECK-NEXT: s_mov_b64 s[34:35], s[4:5]
+; CHECK-NEXT: v_or3_b32 v40, v0, v1, v2
+; CHECK-NEXT: s_mov_b64 s[0:1], s[48:49]
+; CHECK-NEXT: s_mov_b64 s[4:5], s[38:39]
+; CHECK-NEXT: s_mov_b64 s[10:11], s[34:35]
+; CHECK-NEXT: s_mov_b32 s12, s6
+; CHECK-NEXT: s_mov_b32 s13, s7
+; CHECK-NEXT: s_mov_b32 s14, s33
+; CHECK-NEXT: v_mov_b32_e32 v31, v40
+; CHECK-NEXT: s_mov_b64 s[2:3], s[50:51]
+; CHECK-NEXT: s_mov_b32 s32, 0
+; CHECK-NEXT: s_mov_b32 s40, s7
+; CHECK-NEXT: s_mov_b32 s41, s6
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[42:43]
+; CHECK-NEXT: v_mov_b32_e32 v41, v0
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v41
+; CHECK-NEXT: s_and_saveexec_b64 s[44:45], vcc
+; CHECK-NEXT: s_cbranch_execz .LBB0_2
+; CHECK-NEXT: ; %bb.1: ; %bb1
+; CHECK-NEXT: s_add_u32 s8, s36, 44
+; CHECK-NEXT: s_mov_b64 s[0:1], s[48:49]
+; CHECK-NEXT: s_addc_u32 s9, s37, 0
+; CHECK-NEXT: s_mov_b64 s[4:5], s[38:39]
+; CHECK-NEXT: s_mov_b64 s[10:11], s[34:35]
+; CHECK-NEXT: s_mov_b32 s12, s41
+; CHECK-NEXT: s_mov_b32 s13, s40
+; CHECK-NEXT: s_mov_b32 s14, s33
+; CHECK-NEXT: v_mov_b32_e32 v31, v40
+; CHECK-NEXT: s_mov_b64 s[2:3], s[50:51]
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[42:43]
+; CHECK-NEXT: v_mov_b32_e32 v41, v0
+; CHECK-NEXT: .LBB0_2: ; %bb3
+; CHECK-NEXT: s_or_b64 exec, exec, s[44:45]
+; CHECK-NEXT: s_load_dwordx2 s[42:43], s[36:37], 0x24
+; CHECK-NEXT: s_mov_b64 s[44:45], 0
+; CHECK-NEXT: v_mov_b32_e32 v42, 0
+; CHECK-NEXT: s_mov_b64 s[46:47], 0
+; CHECK-NEXT: .LBB0_3: ; %bb4
+; CHECK-NEXT: ; =>This Loop Header: Depth=1
+; CHECK-NEXT: ; Child Loop BB0_5 Depth 2
+; CHECK-NEXT: s_add_u32 s8, s36, 44
+; CHECK-NEXT: s_addc_u32 s9, s37, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_getpc_b64 s[0:1]
+; CHECK-NEXT: s_add_u32 s0, s0, quux at gotpcrel32@lo+4
+; CHECK-NEXT: s_addc_u32 s1, s1, quux at gotpcrel32@hi+12
+; CHECK-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; CHECK-NEXT: s_mov_b64 s[0:1], s[48:49]
+; CHECK-NEXT: s_mov_b64 s[4:5], s[38:39]
+; CHECK-NEXT: s_mov_b64 s[10:11], s[34:35]
+; CHECK-NEXT: s_mov_b32 s12, s41
+; CHECK-NEXT: s_mov_b32 s13, s40
+; CHECK-NEXT: s_mov_b32 s14, s33
+; CHECK-NEXT: v_mov_b32_e32 v31, v40
+; CHECK-NEXT: s_mov_b64 s[2:3], s[50:51]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; CHECK-NEXT: s_or_b64 s[46:47], vcc, s[46:47]
+; CHECK-NEXT: s_andn2_b64 exec, exec, s[46:47]
+; CHECK-NEXT: s_cbranch_execnz .LBB0_3
+; CHECK-NEXT: ; %bb.4: ; %bb7
+; CHECK-NEXT: ; in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: s_or_b64 exec, exec, s[46:47]
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[42:43], 0x0
+; CHECK-NEXT: s_mov_b64 s[2:3], exec
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB0_5: ; %ComputeLoop
+; CHECK-NEXT: ; Parent Loop BB0_3 Depth=1
+; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
+; CHECK-NEXT: s_ff1_i32_b64 s5, s[2:3]
+; CHECK-NEXT: v_readlane_b32 s8, v41, s5
+; CHECK-NEXT: s_lshl_b64 s[6:7], 1, s5
+; CHECK-NEXT: s_add_i32 s4, s4, s8
+; CHECK-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
+; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB0_5
+; CHECK-NEXT: ; %bb.6: ; %ComputeEnd
+; CHECK-NEXT: ; in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; CHECK-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CHECK-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; CHECK-NEXT: s_mov_b64 s[46:47], 0
+; CHECK-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; CHECK-NEXT: s_cbranch_execnz .LBB0_3
+; CHECK-NEXT: ; %bb.7: ; in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: s_or_b64 exec, exec, s[44:45]
+; CHECK-NEXT: v_mov_b32_e32 v0, s4
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_atomic_add v42, v0, s[0:1]
+; CHECK-NEXT: s_mov_b64 s[44:45], 0
+; CHECK-NEXT: s_branch .LBB0_3
+; CHECK-NEXT: ; %bb.8: ; %DummyReturnBlock
+; CHECK-NEXT: s_endpgm
+bb:
+ %call = tail call i32 @quux()
+ %icmp = icmp eq i32 %call, 0
+ br i1 %icmp, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %call2 = tail call i32 @quux()
+ br label %bb3
+
+bb3: ; preds = %bb1, %bb
+ %phi = phi i32 [ %call2, %bb1 ], [ %call, %bb ]
+ br label %bb4
+
+bb4: ; preds = %bb8, %bb3
+ %call5 = tail call i32 @quux()
+ %icmp6 = icmp eq i32 %call5, 0
+ br i1 %icmp6, label %bb8, label %bb7
+
+bb7: ; preds = %bb4
+ %load = load ptr, ptr addrspace(4) %arg, align 8
+ %addrspacecast = addrspacecast ptr %load to ptr addrspace(1)
+ %atomicrmw = atomicrmw add ptr addrspace(1) %addrspacecast, i32 %phi syncscope("agent-one-as") monotonic, align 4
+ br label %bb8
+
+bb8: ; preds = %bb7, %bb4
+ br label %bb4
+}
>From 224a6686b10ca498a8704695144fdcf8408adb7b Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Mon, 8 Apr 2024 08:13:32 +0200
Subject: [PATCH 2/2] fix comments
---
.../Target/AMDGPU/AMDGPUAtomicOptimizer.cpp | 15 +-
.../atomic_optimization_split_dt_update.ll | 169 ++++++------------
2 files changed, 61 insertions(+), 123 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index 8d3b2e82817052..ad98f4f743ae00 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -855,7 +855,7 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
Value *const Cond = B.CreateICmpEQ(Mbcnt, B.getInt32(0));
// Store I's original basic block before we split the block.
- BasicBlock *const EntryBB = I.getParent();
+ BasicBlock *const OriginalBB = I.getParent();
// We need to introduce some new control flow to force a single lane to be
// active. We do this by splitting I's basic block at I, and introducing the
@@ -876,33 +876,36 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
BasicBlock *Predecessor = nullptr;
if (ValDivergent && ScanImpl == ScanOptions::Iterative) {
// Move terminator from I's block to ComputeEnd block.
- BranchInst *Terminator = cast<BranchInst>(EntryBB->getTerminator());
+ //
+ // OriginalBB is known to have a branch as terminator because
+ // SplitBlockAndInsertIfThen will have inserted one.
+ BranchInst *Terminator = cast<BranchInst>(OriginalBB->getTerminator());
B.SetInsertPoint(ComputeEnd);
Terminator->removeFromParent();
B.Insert(Terminator);
// Branch to ComputeLoop Block unconditionally from the I's block for
// iterative approach.
- B.SetInsertPoint(EntryBB);
+ B.SetInsertPoint(OriginalBB);
B.CreateBr(ComputeLoop);
// Update the dominator tree for new control flow.
SmallVector<DominatorTree::UpdateType, 6> DomTreeUpdates(
- {{DominatorTree::Insert, EntryBB, ComputeLoop},
+ {{DominatorTree::Insert, OriginalBB, ComputeLoop},
{DominatorTree::Insert, ComputeLoop, ComputeEnd}});
// We're moving the terminator from EntryBB to ComputeEnd, make sure we move
// the DT edges as well.
for (auto *Succ : Terminator->successors()) {
DomTreeUpdates.push_back({DominatorTree::Insert, ComputeEnd, Succ});
- DomTreeUpdates.push_back({DominatorTree::Delete, EntryBB, Succ});
+ DomTreeUpdates.push_back({DominatorTree::Delete, OriginalBB, Succ});
}
DTU.applyUpdates(DomTreeUpdates);
Predecessor = ComputeEnd;
} else {
- Predecessor = EntryBB;
+ Predecessor = OriginalBB;
}
// Move the IR builder into single_lane next.
B.SetInsertPoint(SingleLaneTerminator);
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimization_split_dt_update.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimization_split_dt_update.ll
index 3c99c2909c2101..c07cd4e493b9ad 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimization_split_dt_update.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimization_split_dt_update.ll
@@ -1,5 +1,5 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -O3 -mtriple=amdgcn-- -mcpu=gfx908 %s -o - -verify-dom-info | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx908 -passes="amdgpu-atomic-optimizer,verify<domtree>" %s -S -o - | FileCheck %s
; Check we're properly adding an edge from ComputeEnd to the "End" block added by
; SplitBlockAndInsertIfThen
@@ -9,121 +9,56 @@
declare i32 @quux()
define amdgpu_kernel void @ham(ptr addrspace(4) %arg) {
-; CHECK-LABEL: ham:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; CHECK-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; CHECK-NEXT: s_mov_b32 s50, -1
-; CHECK-NEXT: s_mov_b32 s51, 0xe00000
-; CHECK-NEXT: s_add_u32 s48, s48, s9
-; CHECK-NEXT: s_addc_u32 s49, s49, 0
-; CHECK-NEXT: s_mov_b64 s[36:37], s[2:3]
-; CHECK-NEXT: s_mov_b32 s33, s8
-; CHECK-NEXT: s_add_u32 s8, s36, 44
-; CHECK-NEXT: s_addc_u32 s9, s37, 0
-; CHECK-NEXT: s_mov_b64 s[38:39], s[0:1]
-; CHECK-NEXT: s_getpc_b64 s[0:1]
-; CHECK-NEXT: s_add_u32 s0, s0, quux at gotpcrel32@lo+4
-; CHECK-NEXT: s_addc_u32 s1, s1, quux at gotpcrel32@hi+12
-; CHECK-NEXT: s_load_dwordx2 s[42:43], s[0:1], 0x0
-; CHECK-NEXT: v_lshlrev_b32_e32 v2, 20, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; CHECK-NEXT: s_mov_b64 s[34:35], s[4:5]
-; CHECK-NEXT: v_or3_b32 v40, v0, v1, v2
-; CHECK-NEXT: s_mov_b64 s[0:1], s[48:49]
-; CHECK-NEXT: s_mov_b64 s[4:5], s[38:39]
-; CHECK-NEXT: s_mov_b64 s[10:11], s[34:35]
-; CHECK-NEXT: s_mov_b32 s12, s6
-; CHECK-NEXT: s_mov_b32 s13, s7
-; CHECK-NEXT: s_mov_b32 s14, s33
-; CHECK-NEXT: v_mov_b32_e32 v31, v40
-; CHECK-NEXT: s_mov_b64 s[2:3], s[50:51]
-; CHECK-NEXT: s_mov_b32 s32, 0
-; CHECK-NEXT: s_mov_b32 s40, s7
-; CHECK-NEXT: s_mov_b32 s41, s6
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: s_swappc_b64 s[30:31], s[42:43]
-; CHECK-NEXT: v_mov_b32_e32 v41, v0
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v41
-; CHECK-NEXT: s_and_saveexec_b64 s[44:45], vcc
-; CHECK-NEXT: s_cbranch_execz .LBB0_2
-; CHECK-NEXT: ; %bb.1: ; %bb1
-; CHECK-NEXT: s_add_u32 s8, s36, 44
-; CHECK-NEXT: s_mov_b64 s[0:1], s[48:49]
-; CHECK-NEXT: s_addc_u32 s9, s37, 0
-; CHECK-NEXT: s_mov_b64 s[4:5], s[38:39]
-; CHECK-NEXT: s_mov_b64 s[10:11], s[34:35]
-; CHECK-NEXT: s_mov_b32 s12, s41
-; CHECK-NEXT: s_mov_b32 s13, s40
-; CHECK-NEXT: s_mov_b32 s14, s33
-; CHECK-NEXT: v_mov_b32_e32 v31, v40
-; CHECK-NEXT: s_mov_b64 s[2:3], s[50:51]
-; CHECK-NEXT: s_swappc_b64 s[30:31], s[42:43]
-; CHECK-NEXT: v_mov_b32_e32 v41, v0
-; CHECK-NEXT: .LBB0_2: ; %bb3
-; CHECK-NEXT: s_or_b64 exec, exec, s[44:45]
-; CHECK-NEXT: s_load_dwordx2 s[42:43], s[36:37], 0x24
-; CHECK-NEXT: s_mov_b64 s[44:45], 0
-; CHECK-NEXT: v_mov_b32_e32 v42, 0
-; CHECK-NEXT: s_mov_b64 s[46:47], 0
-; CHECK-NEXT: .LBB0_3: ; %bb4
-; CHECK-NEXT: ; =>This Loop Header: Depth=1
-; CHECK-NEXT: ; Child Loop BB0_5 Depth 2
-; CHECK-NEXT: s_add_u32 s8, s36, 44
-; CHECK-NEXT: s_addc_u32 s9, s37, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: s_getpc_b64 s[0:1]
-; CHECK-NEXT: s_add_u32 s0, s0, quux at gotpcrel32@lo+4
-; CHECK-NEXT: s_addc_u32 s1, s1, quux at gotpcrel32@hi+12
-; CHECK-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; CHECK-NEXT: s_mov_b64 s[0:1], s[48:49]
-; CHECK-NEXT: s_mov_b64 s[4:5], s[38:39]
-; CHECK-NEXT: s_mov_b64 s[10:11], s[34:35]
-; CHECK-NEXT: s_mov_b32 s12, s41
-; CHECK-NEXT: s_mov_b32 s13, s40
-; CHECK-NEXT: s_mov_b32 s14, s33
-; CHECK-NEXT: v_mov_b32_e32 v31, v40
-; CHECK-NEXT: s_mov_b64 s[2:3], s[50:51]
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; CHECK-NEXT: s_or_b64 s[46:47], vcc, s[46:47]
-; CHECK-NEXT: s_andn2_b64 exec, exec, s[46:47]
-; CHECK-NEXT: s_cbranch_execnz .LBB0_3
-; CHECK-NEXT: ; %bb.4: ; %bb7
-; CHECK-NEXT: ; in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: s_or_b64 exec, exec, s[46:47]
-; CHECK-NEXT: s_load_dwordx2 s[0:1], s[42:43], 0x0
-; CHECK-NEXT: s_mov_b64 s[2:3], exec
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB0_5: ; %ComputeLoop
-; CHECK-NEXT: ; Parent Loop BB0_3 Depth=1
-; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
-; CHECK-NEXT: s_ff1_i32_b64 s5, s[2:3]
-; CHECK-NEXT: v_readlane_b32 s8, v41, s5
-; CHECK-NEXT: s_lshl_b64 s[6:7], 1, s5
-; CHECK-NEXT: s_add_i32 s4, s4, s8
-; CHECK-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
-; CHECK-NEXT: s_cbranch_scc1 .LBB0_5
-; CHECK-NEXT: ; %bb.6: ; %ComputeEnd
-; CHECK-NEXT: ; in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; CHECK-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; CHECK-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; CHECK-NEXT: s_mov_b64 s[46:47], 0
-; CHECK-NEXT: s_andn2_b64 exec, exec, s[44:45]
-; CHECK-NEXT: s_cbranch_execnz .LBB0_3
-; CHECK-NEXT: ; %bb.7: ; in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: s_or_b64 exec, exec, s[44:45]
-; CHECK-NEXT: v_mov_b32_e32 v0, s4
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_atomic_add v42, v0, s[0:1]
-; CHECK-NEXT: s_mov_b64 s[44:45], 0
-; CHECK-NEXT: s_branch .LBB0_3
-; CHECK-NEXT: ; %bb.8: ; %DummyReturnBlock
-; CHECK-NEXT: s_endpgm
+; CHECK-LABEL: define amdgpu_kernel void @ham(
+; CHECK-SAME: ptr addrspace(4) [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @quux()
+; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i32 [[CALL]], 0
+; CHECK-NEXT: br i1 [[ICMP]], label [[BB1:%.*]], label [[BB3:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[CALL2:%.*]] = tail call i32 @quux()
+; CHECK-NEXT: br label [[BB3]]
+; CHECK: bb3:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[CALL2]], [[BB1]] ], [ [[CALL]], [[BB:%.*]] ]
+; CHECK-NEXT: br label [[BB4:%.*]]
+; CHECK: bb4:
+; CHECK-NEXT: [[CALL5:%.*]] = tail call i32 @quux()
+; CHECK-NEXT: [[ICMP6:%.*]] = icmp eq i32 [[CALL5]], 0
+; CHECK-NEXT: br i1 [[ICMP6]], label [[BB8:%.*]], label [[BB7:%.*]]
+; CHECK: bb7:
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[ARG]], align 8
+; CHECK-NEXT: [[ADDRSPACECAST:%.*]] = addrspacecast ptr [[LOAD]] to ptr addrspace(1)
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP0]], 32
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP1]], i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP3]], i32 [[TMP4]])
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; CHECK-NEXT: br label [[COMPUTELOOP:%.*]]
+; CHECK: 7:
+; CHECK-NEXT: [[TMP8:%.*]] = atomicrmw add ptr addrspace(1) [[ADDRSPACECAST]], i32 [[TMP13:%.*]] syncscope("agent-one-as") monotonic, align 4
+; CHECK-NEXT: br label [[TMP9:%.*]]
+; CHECK: 9:
+; CHECK-NEXT: br label [[BB8]]
+; CHECK: bb8:
+; CHECK-NEXT: br label [[BB4]]
+; CHECK: ComputeLoop:
+; CHECK-NEXT: [[ACCUMULATOR:%.*]] = phi i32 [ 0, [[BB7]] ], [ [[TMP13]], [[COMPUTELOOP]] ]
+; CHECK-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP6]], [[BB7]] ], [ [[TMP16:%.*]], [[COMPUTELOOP]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
+; CHECK-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP10]] to i32
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[PHI]], i32 [[TMP11]])
+; CHECK-NEXT: [[TMP13]] = add i32 [[ACCUMULATOR]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = shl i64 1, [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], -1
+; CHECK-NEXT: [[TMP16]] = and i64 [[ACTIVEBITS]], [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[TMP16]], 0
+; CHECK-NEXT: br i1 [[TMP17]], label [[COMPUTEEND:%.*]], label [[COMPUTELOOP]]
+; CHECK: ComputeEnd:
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i32 [[TMP5]], 0
+; CHECK-NEXT: br i1 [[TMP18]], label [[TMP7:%.*]], label [[TMP9]]
+;
bb:
%call = tail call i32 @quux()
%icmp = icmp eq i32 %call, 0
More information about the llvm-commits
mailing list