[llvm] cdea46c - [AMDGPU] Add pattern for inverse.ballot.i64 Wave32 (#132770)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 24 09:30:05 PDT 2025
Author: Ana Mihajlovic
Date: 2025-03-24T17:30:02+01:00
New Revision: cdea46cc8c6d20921b47284aaf8751699efbd3a8
URL: https://github.com/llvm/llvm-project/commit/cdea46cc8c6d20921b47284aaf8751699efbd3a8
DIFF: https://github.com/llvm/llvm-project/commit/cdea46cc8c6d20921b47284aaf8751699efbd3a8.diff
LOG: [AMDGPU] Add pattern for inverse.ballot.i64 Wave32 (#132770)
Added:
Modified:
llvm/lib/Target/AMDGPU/SIInstructions.td
llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index d444816381cfc..900aed5b3f994 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -245,6 +245,12 @@ def S_INVERSE_BALLOT_U64 : SPseudoInstSI<
>;
} // End usesCustomInserter = 1
+let WaveSizePredicate = isWave32 in
+ def : GCNPat <
+ (i1 (int_amdgcn_inverse_ballot i64:$src)),
+ (S_INVERSE_BALLOT_U32 (i32 (EXTRACT_SUBREG SReg_64:$src, sub0)))
+>;
+
// Pseudo instructions used for @llvm.fptrunc.round. The final codegen is done
// in the ModeRegister pass.
let Uses = [MODE, EXEC] in {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll
index 7283ec88a90d8..fb755ea2e5a7f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.inverse.ballot.i64.ll
@@ -1,33 +1,45 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize64 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GISEL %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize64 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=SDAG %s
-
-; RUN: not --crash llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32 -global-isel=1 < %s 2>&1 | FileCheck -check-prefix=GISEL-ERR %s
-; RUN: not --crash llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32 -global-isel=0 < %s 2>&1 | FileCheck -check-prefix=SDAG-ERR %s
-
-; GISEL-ERR: LLVM ERROR: cannot select: {{.*}} = G_INTRINSIC intrinsic(@llvm.amdgcn.inverse.ballot)
-; SDAG-ERR: LLVM ERROR: Cannot select: intrinsic %llvm.amdgcn.inverse.ballot
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize64 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefix=GISEL_W64 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize64 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefix=SDAG_W64 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize32 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefix=GISEL_W32 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize32 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefix=SDAG_W32 %s
declare i1 @llvm.amdgcn.inverse.ballot.i64(i64)
; Test ballot(0)
define amdgpu_cs void @constant_false_inverse_ballot(ptr addrspace(1) %out) {
-; GISEL-LABEL: constant_false_inverse_ballot:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: s_mov_b64 s[0:1], 0
-; GISEL-NEXT: v_mov_b32_e32 v3, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
-; GISEL-NEXT: s_endpgm
-;
-; SDAG-LABEL: constant_false_inverse_ballot:
-; SDAG: ; %bb.0: ; %entry
-; SDAG-NEXT: s_mov_b32 s2, 0
-; SDAG-NEXT: s_mov_b64 s[0:1], 0
-; SDAG-NEXT: v_mov_b32_e32 v3, s2
-; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; SDAG-NEXT: global_store_b64 v[0:1], v[2:3], off
-; SDAG-NEXT: s_endpgm
+; GISEL_W64-LABEL: constant_false_inverse_ballot:
+; GISEL_W64: ; %bb.0: ; %entry
+; GISEL_W64-NEXT: s_mov_b64 s[0:1], 0
+; GISEL_W64-NEXT: v_mov_b32_e32 v3, 0
+; GISEL_W64-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GISEL_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W64-NEXT: s_endpgm
+;
+; SDAG_W64-LABEL: constant_false_inverse_ballot:
+; SDAG_W64: ; %bb.0: ; %entry
+; SDAG_W64-NEXT: s_mov_b32 s2, 0
+; SDAG_W64-NEXT: s_mov_b64 s[0:1], 0
+; SDAG_W64-NEXT: v_mov_b32_e32 v3, s2
+; SDAG_W64-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; SDAG_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W64-NEXT: s_endpgm
+;
+; GISEL_W32-LABEL: constant_false_inverse_ballot:
+; GISEL_W32: ; %bb.0: ; %entry
+; GISEL_W32-NEXT: s_mov_b32 s0, 0
+; GISEL_W32-NEXT: v_mov_b32_e32 v3, 0
+; GISEL_W32-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
+; GISEL_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W32-NEXT: s_endpgm
+;
+; SDAG_W32-LABEL: constant_false_inverse_ballot:
+; SDAG_W32: ; %bb.0: ; %entry
+; SDAG_W32-NEXT: s_mov_b32 s0, 0
+; SDAG_W32-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
+; SDAG_W32-NEXT: v_mov_b32_e32 v3, s0
+; SDAG_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W32-NEXT: s_endpgm
entry:
%ballot = call i1 @llvm.amdgcn.inverse.ballot.i64(i64 0)
%sel = select i1 %ballot, i64 1, i64 0
@@ -38,22 +50,39 @@ entry:
; Test ballot(1)
define amdgpu_cs void @constant_true_inverse_ballot(ptr addrspace(1) %out) {
-; GISEL-LABEL: constant_true_inverse_ballot:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: s_mov_b64 s[0:1], -1
-; GISEL-NEXT: v_mov_b32_e32 v3, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
-; GISEL-NEXT: s_endpgm
-;
-; SDAG-LABEL: constant_true_inverse_ballot:
-; SDAG: ; %bb.0: ; %entry
-; SDAG-NEXT: s_mov_b32 s2, 0
-; SDAG-NEXT: s_mov_b64 s[0:1], -1
-; SDAG-NEXT: v_mov_b32_e32 v3, s2
-; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; SDAG-NEXT: global_store_b64 v[0:1], v[2:3], off
-; SDAG-NEXT: s_endpgm
+; GISEL_W64-LABEL: constant_true_inverse_ballot:
+; GISEL_W64: ; %bb.0: ; %entry
+; GISEL_W64-NEXT: s_mov_b64 s[0:1], -1
+; GISEL_W64-NEXT: v_mov_b32_e32 v3, 0
+; GISEL_W64-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GISEL_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W64-NEXT: s_endpgm
+;
+; SDAG_W64-LABEL: constant_true_inverse_ballot:
+; SDAG_W64: ; %bb.0: ; %entry
+; SDAG_W64-NEXT: s_mov_b32 s2, 0
+; SDAG_W64-NEXT: s_mov_b64 s[0:1], -1
+; SDAG_W64-NEXT: v_mov_b32_e32 v3, s2
+; SDAG_W64-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; SDAG_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W64-NEXT: s_endpgm
+;
+; GISEL_W32-LABEL: constant_true_inverse_ballot:
+; GISEL_W32: ; %bb.0: ; %entry
+; GISEL_W32-NEXT: s_mov_b32 s0, -1
+; GISEL_W32-NEXT: v_mov_b32_e32 v3, 0
+; GISEL_W32-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
+; GISEL_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W32-NEXT: s_endpgm
+;
+; SDAG_W32-LABEL: constant_true_inverse_ballot:
+; SDAG_W32: ; %bb.0: ; %entry
+; SDAG_W32-NEXT: s_mov_b32 s0, 0
+; SDAG_W32-NEXT: s_mov_b32 s1, -1
+; SDAG_W32-NEXT: v_mov_b32_e32 v3, s0
+; SDAG_W32-NEXT: v_cndmask_b32_e64 v2, 0, 1, s1
+; SDAG_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W32-NEXT: s_endpgm
entry:
%ballot = call i1 @llvm.amdgcn.inverse.ballot.i64(i64 u0xFFFFFFFFFFFFFFFF)
%sel = select i1 %ballot, i64 1, i64 0
@@ -64,24 +93,41 @@ entry:
; Test ballot(u0x0040F8010000)
define amdgpu_cs void @constant_mask_inverse_ballot(ptr addrspace(1) %out) {
-; GISEL-LABEL: constant_mask_inverse_ballot:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: s_mov_b32 s0, 0xf8010000
-; GISEL-NEXT: s_mov_b32 s1, 64
-; GISEL-NEXT: v_mov_b32_e32 v3, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
-; GISEL-NEXT: s_endpgm
-;
-; SDAG-LABEL: constant_mask_inverse_ballot:
-; SDAG: ; %bb.0: ; %entry
-; SDAG-NEXT: s_mov_b32 s0, 0xf8010000
-; SDAG-NEXT: s_mov_b32 s1, 64
-; SDAG-NEXT: s_mov_b32 s2, 0
-; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; SDAG-NEXT: v_mov_b32_e32 v3, s2
-; SDAG-NEXT: global_store_b64 v[0:1], v[2:3], off
-; SDAG-NEXT: s_endpgm
+; GISEL_W64-LABEL: constant_mask_inverse_ballot:
+; GISEL_W64: ; %bb.0: ; %entry
+; GISEL_W64-NEXT: s_mov_b32 s0, 0xf8010000
+; GISEL_W64-NEXT: s_mov_b32 s1, 64
+; GISEL_W64-NEXT: v_mov_b32_e32 v3, 0
+; GISEL_W64-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GISEL_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W64-NEXT: s_endpgm
+;
+; SDAG_W64-LABEL: constant_mask_inverse_ballot:
+; SDAG_W64: ; %bb.0: ; %entry
+; SDAG_W64-NEXT: s_mov_b32 s0, 0xf8010000
+; SDAG_W64-NEXT: s_mov_b32 s1, 64
+; SDAG_W64-NEXT: s_mov_b32 s2, 0
+; SDAG_W64-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; SDAG_W64-NEXT: v_mov_b32_e32 v3, s2
+; SDAG_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W64-NEXT: s_endpgm
+;
+; GISEL_W32-LABEL: constant_mask_inverse_ballot:
+; GISEL_W32: ; %bb.0: ; %entry
+; GISEL_W32-NEXT: s_mov_b32 s0, 0xf8010000
+; GISEL_W32-NEXT: v_mov_b32_e32 v3, 0
+; GISEL_W32-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
+; GISEL_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W32-NEXT: s_endpgm
+;
+; SDAG_W32-LABEL: constant_mask_inverse_ballot:
+; SDAG_W32: ; %bb.0: ; %entry
+; SDAG_W32-NEXT: s_mov_b32 s0, 0
+; SDAG_W32-NEXT: s_mov_b32 s1, 0xf8010000
+; SDAG_W32-NEXT: v_mov_b32_e32 v3, s0
+; SDAG_W32-NEXT: v_cndmask_b32_e64 v2, 0, 1, s1
+; SDAG_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W32-NEXT: s_endpgm
entry:
%ballot = call i1 @llvm.amdgcn.inverse.ballot.i64(i64 u0x0040F8010000)
%sel = select i1 %ballot, i64 1, i64 0
@@ -92,24 +138,41 @@ entry:
; Test inverse ballot using a vgpr as input
define amdgpu_cs void @vgpr_inverse_ballot(i64 %input, ptr addrspace(1) %out) {
-; GISEL-LABEL: vgpr_inverse_ballot:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: v_readfirstlane_b32 s0, v0
-; GISEL-NEXT: v_readfirstlane_b32 s1, v1
-; GISEL-NEXT: v_mov_b32_e32 v5, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, s[0:1]
-; GISEL-NEXT: global_store_b64 v[2:3], v[4:5], off
-; GISEL-NEXT: s_endpgm
-;
-; SDAG-LABEL: vgpr_inverse_ballot:
-; SDAG: ; %bb.0: ; %entry
-; SDAG-NEXT: v_readfirstlane_b32 s0, v0
-; SDAG-NEXT: v_readfirstlane_b32 s1, v1
-; SDAG-NEXT: s_mov_b32 s2, 0
-; SDAG-NEXT: v_mov_b32_e32 v1, s2
-; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SDAG-NEXT: global_store_b64 v[2:3], v[0:1], off
-; SDAG-NEXT: s_endpgm
+; GISEL_W64-LABEL: vgpr_inverse_ballot:
+; GISEL_W64: ; %bb.0: ; %entry
+; GISEL_W64-NEXT: v_readfirstlane_b32 s0, v0
+; GISEL_W64-NEXT: v_readfirstlane_b32 s1, v1
+; GISEL_W64-NEXT: v_mov_b32_e32 v5, 0
+; GISEL_W64-NEXT: v_cndmask_b32_e64 v4, 0, 1, s[0:1]
+; GISEL_W64-NEXT: global_store_b64 v[2:3], v[4:5], off
+; GISEL_W64-NEXT: s_endpgm
+;
+; SDAG_W64-LABEL: vgpr_inverse_ballot:
+; SDAG_W64: ; %bb.0: ; %entry
+; SDAG_W64-NEXT: v_readfirstlane_b32 s0, v0
+; SDAG_W64-NEXT: v_readfirstlane_b32 s1, v1
+; SDAG_W64-NEXT: s_mov_b32 s2, 0
+; SDAG_W64-NEXT: v_mov_b32_e32 v1, s2
+; SDAG_W64-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; SDAG_W64-NEXT: global_store_b64 v[2:3], v[0:1], off
+; SDAG_W64-NEXT: s_endpgm
+;
+; GISEL_W32-LABEL: vgpr_inverse_ballot:
+; GISEL_W32: ; %bb.0: ; %entry
+; GISEL_W32-NEXT: v_readfirstlane_b32 s0, v0
+; GISEL_W32-NEXT: v_mov_b32_e32 v1, 0
+; GISEL_W32-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GISEL_W32-NEXT: global_store_b64 v[2:3], v[0:1], off
+; GISEL_W32-NEXT: s_endpgm
+;
+; SDAG_W32-LABEL: vgpr_inverse_ballot:
+; SDAG_W32: ; %bb.0: ; %entry
+; SDAG_W32-NEXT: v_readfirstlane_b32 s1, v0
+; SDAG_W32-NEXT: s_mov_b32 s0, 0
+; SDAG_W32-NEXT: v_mov_b32_e32 v1, s0
+; SDAG_W32-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1
+; SDAG_W32-NEXT: global_store_b64 v[2:3], v[0:1], off
+; SDAG_W32-NEXT: s_endpgm
entry:
%ballot = call i1 @llvm.amdgcn.inverse.ballot.i64(i64 %input)
%sel = select i1 %ballot, i64 1, i64 0
@@ -118,21 +181,36 @@ entry:
}
define amdgpu_cs void @sgpr_inverse_ballot(i64 inreg %input, ptr addrspace(1) %out) {
-; GISEL-LABEL: sgpr_inverse_ballot:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; GISEL-NEXT: v_mov_b32_e32 v3, 0
-; GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
-; GISEL-NEXT: s_endpgm
-;
-; SDAG-LABEL: sgpr_inverse_ballot:
-; SDAG: ; %bb.0: ; %entry
-; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; SDAG-NEXT: s_mov_b32 s0, 0
-; SDAG-NEXT: s_waitcnt_depctr 0xfffe
-; SDAG-NEXT: v_mov_b32_e32 v3, s0
-; SDAG-NEXT: global_store_b64 v[0:1], v[2:3], off
-; SDAG-NEXT: s_endpgm
+; GISEL_W64-LABEL: sgpr_inverse_ballot:
+; GISEL_W64: ; %bb.0: ; %entry
+; GISEL_W64-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GISEL_W64-NEXT: v_mov_b32_e32 v3, 0
+; GISEL_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W64-NEXT: s_endpgm
+;
+; SDAG_W64-LABEL: sgpr_inverse_ballot:
+; SDAG_W64: ; %bb.0: ; %entry
+; SDAG_W64-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; SDAG_W64-NEXT: s_mov_b32 s0, 0
+; SDAG_W64-NEXT: s_waitcnt_depctr 0xfffe
+; SDAG_W64-NEXT: v_mov_b32_e32 v3, s0
+; SDAG_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W64-NEXT: s_endpgm
+;
+; GISEL_W32-LABEL: sgpr_inverse_ballot:
+; GISEL_W32: ; %bb.0: ; %entry
+; GISEL_W32-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
+; GISEL_W32-NEXT: v_mov_b32_e32 v3, 0
+; GISEL_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W32-NEXT: s_endpgm
+;
+; SDAG_W32-LABEL: sgpr_inverse_ballot:
+; SDAG_W32: ; %bb.0: ; %entry
+; SDAG_W32-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
+; SDAG_W32-NEXT: s_mov_b32 s0, 0
+; SDAG_W32-NEXT: v_mov_b32_e32 v3, s0
+; SDAG_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W32-NEXT: s_endpgm
entry:
%ballot = call i1 @llvm.amdgcn.inverse.ballot.i64(i64 %input)
%sel = select i1 %ballot, i64 1, i64 0
@@ -142,32 +220,57 @@ entry:
; Test ballot after phi
define amdgpu_cs void @phi_uniform(i64 inreg %s0_1, i64 inreg %s2, ptr addrspace(1) %out) {
-; GISEL-LABEL: phi_uniform:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0
-; GISEL-NEXT: s_cbranch_scc1 .LBB5_2
-; GISEL-NEXT: ; %bb.1: ; %if
-; GISEL-NEXT: s_add_u32 s0, s0, 1
-; GISEL-NEXT: s_addc_u32 s1, s1, 0
-; GISEL-NEXT: .LBB5_2: ; %endif
-; GISEL-NEXT: v_mov_b32_e32 v3, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
-; GISEL-NEXT: s_endpgm
-;
-; SDAG-LABEL: phi_uniform:
-; SDAG: ; %bb.0: ; %entry
-; SDAG-NEXT: s_cmp_lg_u64 s[2:3], 0
-; SDAG-NEXT: s_cbranch_scc1 .LBB5_2
-; SDAG-NEXT: ; %bb.1: ; %if
-; SDAG-NEXT: s_add_u32 s0, s0, 1
-; SDAG-NEXT: s_addc_u32 s1, s1, 0
-; SDAG-NEXT: .LBB5_2: ; %endif
-; SDAG-NEXT: s_mov_b32 s2, 0
-; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; SDAG-NEXT: v_mov_b32_e32 v3, s2
-; SDAG-NEXT: global_store_b64 v[0:1], v[2:3], off
-; SDAG-NEXT: s_endpgm
+; GISEL_W64-LABEL: phi_uniform:
+; GISEL_W64: ; %bb.0: ; %entry
+; GISEL_W64-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GISEL_W64-NEXT: s_cbranch_scc1 .LBB5_2
+; GISEL_W64-NEXT: ; %bb.1: ; %if
+; GISEL_W64-NEXT: s_add_u32 s0, s0, 1
+; GISEL_W64-NEXT: s_addc_u32 s1, s1, 0
+; GISEL_W64-NEXT: .LBB5_2: ; %endif
+; GISEL_W64-NEXT: v_mov_b32_e32 v3, 0
+; GISEL_W64-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GISEL_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W64-NEXT: s_endpgm
+;
+; SDAG_W64-LABEL: phi_uniform:
+; SDAG_W64: ; %bb.0: ; %entry
+; SDAG_W64-NEXT: s_cmp_lg_u64 s[2:3], 0
+; SDAG_W64-NEXT: s_cbranch_scc1 .LBB5_2
+; SDAG_W64-NEXT: ; %bb.1: ; %if
+; SDAG_W64-NEXT: s_add_u32 s0, s0, 1
+; SDAG_W64-NEXT: s_addc_u32 s1, s1, 0
+; SDAG_W64-NEXT: .LBB5_2: ; %endif
+; SDAG_W64-NEXT: s_mov_b32 s2, 0
+; SDAG_W64-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; SDAG_W64-NEXT: v_mov_b32_e32 v3, s2
+; SDAG_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W64-NEXT: s_endpgm
+;
+; GISEL_W32-LABEL: phi_uniform:
+; GISEL_W32: ; %bb.0: ; %entry
+; GISEL_W32-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GISEL_W32-NEXT: s_cbranch_scc1 .LBB5_2
+; GISEL_W32-NEXT: ; %bb.1: ; %if
+; GISEL_W32-NEXT: s_add_u32 s0, s0, 1
+; GISEL_W32-NEXT: .LBB5_2: ; %endif
+; GISEL_W32-NEXT: v_mov_b32_e32 v3, 0
+; GISEL_W32-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
+; GISEL_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W32-NEXT: s_endpgm
+;
+; SDAG_W32-LABEL: phi_uniform:
+; SDAG_W32: ; %bb.0: ; %entry
+; SDAG_W32-NEXT: s_cmp_lg_u64 s[2:3], 0
+; SDAG_W32-NEXT: s_cbranch_scc1 .LBB5_2
+; SDAG_W32-NEXT: ; %bb.1: ; %if
+; SDAG_W32-NEXT: s_add_u32 s0, s0, 1
+; SDAG_W32-NEXT: .LBB5_2: ; %endif
+; SDAG_W32-NEXT: s_mov_b32 s1, 0
+; SDAG_W32-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
+; SDAG_W32-NEXT: v_mov_b32_e32 v3, s1
+; SDAG_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W32-NEXT: s_endpgm
entry:
%cc = icmp ne i64 %s2, 0
br i1 %cc, label %endif, label %if
@@ -186,39 +289,65 @@ endif:
}
; Test for branching
-; GISel implementation is currently incorrect.
; The change in the branch affects all lanes, not just the branching ones.
; This test will be fixed once GISel correctly takes uniformity analysis into account.
define amdgpu_cs void @inverse_ballot_branch(i64 inreg %s0_1, i64 inreg %s2, ptr addrspace(1) %out) {
-; GISEL-LABEL: inverse_ballot_branch:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: s_xor_b64 s[4:5], s[2:3], -1
-; GISEL-NEXT: s_and_saveexec_b64 s[2:3], s[4:5]
-; GISEL-NEXT: ; %bb.1: ; %if
-; GISEL-NEXT: s_add_u32 s0, s0, 1
-; GISEL-NEXT: s_addc_u32 s1, s1, 0
-; GISEL-NEXT: ; %bb.2: ; %endif
-; GISEL-NEXT: s_or_b64 exec, exec, s[2:3]
-; GISEL-NEXT: v_mov_b32_e32 v3, s1
-; GISEL-NEXT: v_mov_b32_e32 v2, s0
-; GISEL-NEXT: global_store_b64 v[0:1], v[2:3], off
-; GISEL-NEXT: s_endpgm
-;
-; SDAG-LABEL: inverse_ballot_branch:
-; SDAG: ; %bb.0: ; %entry
-; SDAG-NEXT: v_mov_b32_e32 v3, s1
-; SDAG-NEXT: v_mov_b32_e32 v2, s0
-; SDAG-NEXT: s_xor_b64 s[4:5], s[2:3], -1
-; SDAG-NEXT: s_and_saveexec_b64 s[2:3], s[4:5]
-; SDAG-NEXT: ; %bb.1: ; %if
-; SDAG-NEXT: s_add_u32 s0, s0, 1
-; SDAG-NEXT: s_addc_u32 s1, s1, 0
-; SDAG-NEXT: v_mov_b32_e32 v3, s1
-; SDAG-NEXT: v_mov_b32_e32 v2, s0
-; SDAG-NEXT: ; %bb.2: ; %endif
-; SDAG-NEXT: s_or_b64 exec, exec, s[2:3]
-; SDAG-NEXT: global_store_b64 v[0:1], v[2:3], off
-; SDAG-NEXT: s_endpgm
+; GISEL_W64-LABEL: inverse_ballot_branch:
+; GISEL_W64: ; %bb.0: ; %entry
+; GISEL_W64-NEXT: s_xor_b64 s[4:5], s[2:3], -1
+; GISEL_W64-NEXT: s_and_saveexec_b64 s[2:3], s[4:5]
+; GISEL_W64-NEXT: ; %bb.1: ; %if
+; GISEL_W64-NEXT: s_add_u32 s0, s0, 1
+; GISEL_W64-NEXT: s_addc_u32 s1, s1, 0
+; GISEL_W64-NEXT: ; %bb.2: ; %endif
+; GISEL_W64-NEXT: s_or_b64 exec, exec, s[2:3]
+; GISEL_W64-NEXT: v_mov_b32_e32 v3, s1
+; GISEL_W64-NEXT: v_mov_b32_e32 v2, s0
+; GISEL_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W64-NEXT: s_endpgm
+;
+; SDAG_W64-LABEL: inverse_ballot_branch:
+; SDAG_W64: ; %bb.0: ; %entry
+; SDAG_W64-NEXT: v_mov_b32_e32 v3, s1
+; SDAG_W64-NEXT: v_mov_b32_e32 v2, s0
+; SDAG_W64-NEXT: s_xor_b64 s[4:5], s[2:3], -1
+; SDAG_W64-NEXT: s_and_saveexec_b64 s[2:3], s[4:5]
+; SDAG_W64-NEXT: ; %bb.1: ; %if
+; SDAG_W64-NEXT: s_add_u32 s0, s0, 1
+; SDAG_W64-NEXT: s_addc_u32 s1, s1, 0
+; SDAG_W64-NEXT: v_mov_b32_e32 v3, s1
+; SDAG_W64-NEXT: v_mov_b32_e32 v2, s0
+; SDAG_W64-NEXT: ; %bb.2: ; %endif
+; SDAG_W64-NEXT: s_or_b64 exec, exec, s[2:3]
+; SDAG_W64-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W64-NEXT: s_endpgm
+;
+; GISEL_W32-LABEL: inverse_ballot_branch:
+; GISEL_W32: ; %bb.0: ; %entry
+; GISEL_W32-NEXT: s_xor_b32 s3, s2, -1
+; GISEL_W32-NEXT: s_and_saveexec_b32 s2, s3
+; GISEL_W32-NEXT: ; %bb.1: ; %if
+; GISEL_W32-NEXT: s_add_u32 s0, s0, 1
+; GISEL_W32-NEXT: s_addc_u32 s1, s1, 0
+; GISEL_W32-NEXT: ; %bb.2: ; %endif
+; GISEL_W32-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GISEL_W32-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GISEL_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GISEL_W32-NEXT: s_endpgm
+;
+; SDAG_W32-LABEL: inverse_ballot_branch:
+; SDAG_W32: ; %bb.0: ; %entry
+; SDAG_W32-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; SDAG_W32-NEXT: s_xor_b32 s3, s2, -1
+; SDAG_W32-NEXT: s_and_saveexec_b32 s2, s3
+; SDAG_W32-NEXT: ; %bb.1: ; %if
+; SDAG_W32-NEXT: s_add_u32 s0, s0, 1
+; SDAG_W32-NEXT: s_addc_u32 s1, s1, 0
+; SDAG_W32-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; SDAG_W32-NEXT: ; %bb.2: ; %endif
+; SDAG_W32-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; SDAG_W32-NEXT: global_store_b64 v[0:1], v[2:3], off
+; SDAG_W32-NEXT: s_endpgm
entry:
%ballot = call i1 @llvm.amdgcn.inverse.ballot.i64(i64 %s2)
br i1 %ballot, label %endif, label %if
More information about the llvm-commits
mailing list