[llvm] [AMDGPU] Simplify cond branch if condition is known (PR #180081)
Jay Foad via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 17 02:32:28 PST 2026
================
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a < %s | FileCheck %s
+
+define amdgpu_kernel void @_start(ptr %0) {
+; CHECK-LABEL: _start:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; CHECK-NEXT: s_add_u32 flat_scratch_lo, s12, s17
+; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
+; CHECK-NEXT: s_mov_b64 s[2:3], 0
+; CHECK-NEXT: ; %bb.1: ; %dynamic-memcpy-expansion-main-body.preheader
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: .LBB0_2: ; %dynamic-memcpy-expansion-main-body
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v0, s[2:3]
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_add_u32 s4, s0, s2
+; CHECK-NEXT: s_addc_u32 s5, s1, s3
+; CHECK-NEXT: s_add_u32 s2, s2, 16
+; CHECK-NEXT: s_addc_u32 s3, s3, 0
+; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
+; CHECK-NEXT: v_cmp_lt_u64_e64 s[4:5], s[2:3], 16
+; CHECK-NEXT: s_and_b64 vcc, exec, s[4:5]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_dwordx4 v[2:3], v[4:7]
+; CHECK-NEXT: s_cbranch_vccnz .LBB0_2
+; CHECK-NEXT: ; %bb.3: ; %dynamic-memcpy-expansion-residual-body.preheader
+; CHECK-NEXT: s_sub_u32 s2, 29, 13
+; CHECK-NEXT: s_subb_u32 s3, 0, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_add_u32 s2, s0, s2
+; CHECK-NEXT: s_addc_u32 s3, s1, s3
+; CHECK-NEXT: s_sub_u32 s4, 0, 13
+; CHECK-NEXT: s_mov_b64 s[0:1], 0
+; CHECK-NEXT: s_subb_u32 s5, 0, 0
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: .LBB0_4: ; %dynamic-memcpy-expansion-residual-body
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_add_u32 s6, s4, s0
+; CHECK-NEXT: s_addc_u32 s7, s5, s1
+; CHECK-NEXT: global_load_ubyte v1, v0, s[6:7] offset:29
+; CHECK-NEXT: s_add_u32 s6, s2, s0
+; CHECK-NEXT: s_addc_u32 s7, s3, s1
+; CHECK-NEXT: s_add_u32 s0, s0, 1
+; CHECK-NEXT: s_addc_u32 s1, s1, 0
+; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1]
+; CHECK-NEXT: v_cmp_lt_u64_e64 s[6:7], s[0:1], 13
+; CHECK-NEXT: s_and_b64 vcc, exec, s[6:7]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_store_byte v[2:3], v1
+; CHECK-NEXT: s_cbranch_vccnz .LBB0_4
+; CHECK-NEXT: ; %bb.5: ; %dynamic-memcpy-post-expansion
+; CHECK-NEXT: s_endpgm
+ call void @llvm.memcpy.p0.p4.i64(ptr %0, ptr addrspace(4) null, i64 add (i64 sub (i64 16, i64 ptrtoint (ptr addrspacecast (ptr addrspace(4) null to ptr) to i64)), i64 13), i1 false)
----------------
jayfoad wrote:
I still think this should be optimized at the IR level if possible. I don't know where exactly, but there must be some place that we have enough target knowledge to optimize this into a simple constant `i64 29`.
https://github.com/llvm/llvm-project/pull/180081
More information about the llvm-commits
mailing list