[llvm] 66bd845 - [AMDGPU][TargetLowering] Allow forming overflow op if it is legal (#156266)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 25 06:25:18 PDT 2025
Author: AZero13
Date: 2025-09-25T22:25:14+09:00
New Revision: 66bd8456fccf677bb1cab1d8ba3095006eb73709
URL: https://github.com/llvm/llvm-project/commit/66bd8456fccf677bb1cab1d8ba3095006eb73709
DIFF: https://github.com/llvm/llvm-project/commit/66bd8456fccf677bb1cab1d8ba3095006eb73709.diff
LOG: [AMDGPU][TargetLowering] Allow forming overflow op if it is legal (#156266)
Because usubo and uaddo are legal in AMDGPU in 32 bits, we want to use
it whenever possible.
Added:
Modified:
llvm/include/llvm/CodeGen/TargetLowering.h
llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll
llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
llvm/test/CodeGen/AMDGPU/sad.ll
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 75c073bb3871c..c45e03a7bdad8 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -3455,6 +3455,10 @@ class LLVM_ABI TargetLoweringBase {
/// matching of other patterns.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
bool MathUsed) const {
+ // Form it if it is legal.
+ if (isOperationLegal(Opcode, VT))
+ return true;
+
// TODO: The default logic is inherited from code in CodeGenPrepare.
// The opcode should not make a
diff erence by default?
if (Opcode != ISD::UADDO)
diff --git a/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll b/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
index e6f02295e67d5..3f699a5ca218b 100644
--- a/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
+++ b/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
@@ -98,14 +98,12 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) {
; GFX7-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; GFX7-NEXT: s_mov_b32 flat_scratch_lo, s13
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: s_add_i32 s0, s2, s2
-; GFX7-NEXT: s_cmp_lt_u32 s0, s2
-; GFX7-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX7-NEXT: v_add_i32_e64 v0, s[0:1], s2, s2
; GFX7-NEXT: s_or_b32 s0, s0, s1
; GFX7-NEXT: s_cmp_lg_u32 s0, 0
; GFX7-NEXT: s_addc_u32 s0, s2, 0
-; GFX7-NEXT: v_cmp_ge_u32_e32 vcc, s0, v0
+; GFX7-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX7-NEXT: s_andn2_b64 vcc, exec, s[0:1]
; GFX7-NEXT: s_cbranch_vccnz .LBB1_2
; GFX7-NEXT: ; %bb.1: ; %bb0
; GFX7-NEXT: v_mov_b32_e32 v0, 0
@@ -125,13 +123,11 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) {
; GFX9: ; %bb.0: ; %bb
; GFX9-NEXT: s_load_dword s2, s[8:9], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_add_i32 s0, s2, s2
-; GFX9-NEXT: s_cmp_lt_u32 s0, s2
-; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: v_add_co_u32_e64 v0, s[0:1], s2, s2
; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX9-NEXT: s_addc_u32 s0, s2, 0
-; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, s0, v0
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[0:1]
; GFX9-NEXT: s_cbranch_vccnz .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %bb0
; GFX9-NEXT: v_mov_b32_e32 v0, 0
@@ -151,13 +147,11 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) {
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: s_load_dword s0, s[8:9], 0x0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_add_i32 s1, s0, s0
-; GFX10-NEXT: s_cmp_lt_u32 s1, s0
-; GFX10-NEXT: s_cselect_b32 s1, -1, 0
-; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1
+; GFX10-NEXT: v_add_co_u32 v0, s1, s0, s0
; GFX10-NEXT: s_cmp_lg_u32 s1, 0
; GFX10-NEXT: s_addc_u32 s0, s0, 0
-; GFX10-NEXT: v_cmp_ge_u32_e32 vcc_lo, s0, v0
+; GFX10-NEXT: s_cselect_b32 s0, -1, 0
+; GFX10-NEXT: s_andn2_b32 vcc_lo, exec_lo, s0
; GFX10-NEXT: s_cbranch_vccnz .LBB1_2
; GFX10-NEXT: ; %bb.1: ; %bb0
; GFX10-NEXT: v_mov_b32_e32 v0, 0
@@ -177,15 +171,12 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) {
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s1, s0, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_cmp_lt_u32 s1, s0
-; GFX11-NEXT: s_cselect_b32 s1, -1, 0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1
+; GFX11-NEXT: v_add_co_u32 v0, s1, s0, s0
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_addc_u32 s0, s0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_cmp_ge_u32_e32 vcc_lo, s0, v0
+; GFX11-NEXT: s_cselect_b32 s0, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %bb0
; GFX11-NEXT: v_mov_b32_e32 v0, 0
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll
index e74ad3d62bea4..47161954cc332 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll
@@ -8946,8 +8946,7 @@ define void @flat_atomic_udec_wrap_i32_noret(ptr %ptr, i32 %in) {
; GCN1-NEXT: .LBB141_1: ; %atomicrmw.start
; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, -1, v4
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN1-NEXT: v_subrev_i32_e32 v3, vcc, 1, v4
; GCN1-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -8971,8 +8970,7 @@ define void @flat_atomic_udec_wrap_i32_noret(ptr %ptr, i32 %in) {
; GCN2-NEXT: .LBB141_1: ; %atomicrmw.start
; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, -1, v4
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN2-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4
; GCN2-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -8996,9 +8994,8 @@ define void @flat_atomic_udec_wrap_i32_noret(ptr %ptr, i32 %in) {
; GCN3-NEXT: .LBB141_1: ; %atomicrmw.start
; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN3-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4
; GCN3-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GCN3-NEXT: v_add_u32_e32 v3, -1, v4
; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
@@ -9027,8 +9024,7 @@ define void @flat_atomic_udec_wrap_i32_noret_offset(ptr %out, i32 %in) {
; GCN1-NEXT: .LBB142_1: ; %atomicrmw.start
; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, -1, v4
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN1-NEXT: v_subrev_i32_e32 v3, vcc, 1, v4
; GCN1-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -9054,8 +9050,7 @@ define void @flat_atomic_udec_wrap_i32_noret_offset(ptr %out, i32 %in) {
; GCN2-NEXT: .LBB142_1: ; %atomicrmw.start
; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, -1, v4
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN2-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4
; GCN2-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -9079,9 +9074,8 @@ define void @flat_atomic_udec_wrap_i32_noret_offset(ptr %out, i32 %in) {
; GCN3-NEXT: .LBB142_1: ; %atomicrmw.start
; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN3-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4
; GCN3-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GCN3-NEXT: v_add_u32_e32 v3, -1, v4
; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
@@ -9110,8 +9104,7 @@ define i32 @flat_atomic_udec_wrap_i32_ret(ptr %ptr, i32 %in) {
; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, -1, v4
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN1-NEXT: v_subrev_i32_e32 v3, vcc, 1, v4
; GCN1-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -9136,8 +9129,7 @@ define i32 @flat_atomic_udec_wrap_i32_ret(ptr %ptr, i32 %in) {
; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, -1, v4
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN2-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4
; GCN2-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -9162,9 +9154,8 @@ define i32 @flat_atomic_udec_wrap_i32_ret(ptr %ptr, i32 %in) {
; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN3-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4
; GCN3-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GCN3-NEXT: v_add_u32_e32 v3, -1, v4
; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
@@ -9194,8 +9185,7 @@ define i32 @flat_atomic_udec_wrap_i32_ret_offset(ptr %out, i32 %in) {
; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, -1, v1
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN1-NEXT: v_subrev_i32_e32 v0, vcc, 1, v1
; GCN1-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2
; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
@@ -9221,8 +9211,7 @@ define i32 @flat_atomic_udec_wrap_i32_ret_offset(ptr %out, i32 %in) {
; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, -1, v1
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN2-NEXT: v_subrev_u32_e32 v0, vcc, 1, v1
; GCN2-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2
; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
@@ -9246,9 +9235,8 @@ define i32 @flat_atomic_udec_wrap_i32_ret_offset(ptr %out, i32 %in) {
; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN3-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4
; GCN3-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GCN3-NEXT: v_add_u32_e32 v3, -1, v4
; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
@@ -9279,8 +9267,7 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_scalar(ptr inreg %ptr, i
; GCN1-NEXT: .LBB145_1: ; %atomicrmw.start
; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v2, vcc, -1, v3
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GCN1-NEXT: v_subrev_i32_e32 v2, vcc, 1, v3
; GCN1-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3
; GCN1-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN1-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
@@ -9307,8 +9294,7 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_scalar(ptr inreg %ptr, i
; GCN2-NEXT: .LBB145_1: ; %atomicrmw.start
; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v2, vcc, -1, v3
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GCN2-NEXT: v_subrev_u32_e32 v2, vcc, 1, v3
; GCN2-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3
; GCN2-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN2-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
@@ -9335,9 +9321,8 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_scalar(ptr inreg %ptr, i
; GCN3-NEXT: .LBB145_1: ; %atomicrmw.start
; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GCN3-NEXT: v_subrev_co_u32_e32 v2, vcc, 1, v3
; GCN3-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3
-; GCN3-NEXT: v_add_u32_e32 v2, -1, v3
; GCN3-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN3-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
; GCN3-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
@@ -9369,8 +9354,7 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_offset_scalar(ptr inreg
; GCN1-NEXT: .LBB146_1: ; %atomicrmw.start
; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v2, vcc, -1, v3
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GCN1-NEXT: v_subrev_i32_e32 v2, vcc, 1, v3
; GCN1-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3
; GCN1-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN1-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
@@ -9399,8 +9383,7 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_offset_scalar(ptr inreg
; GCN2-NEXT: .LBB146_1: ; %atomicrmw.start
; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v2, vcc, -1, v3
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GCN2-NEXT: v_subrev_u32_e32 v2, vcc, 1, v3
; GCN2-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3
; GCN2-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN2-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
@@ -9427,9 +9410,8 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_offset_scalar(ptr inreg
; GCN3-NEXT: .LBB146_1: ; %atomicrmw.start
; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; GCN3-NEXT: v_subrev_co_u32_e32 v2, vcc, 1, v3
; GCN3-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3
-; GCN3-NEXT: v_add_u32_e32 v2, -1, v3
; GCN3-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN3-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
; GCN3-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] offset:16 glc
@@ -9463,8 +9445,7 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_scalar(ptr inreg %ptr, i32
; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v5, v0
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, -1, v5
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GCN1-NEXT: v_subrev_i32_e32 v0, vcc, 1, v5
; GCN1-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5
; GCN1-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN1-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc
@@ -9493,8 +9474,7 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_scalar(ptr inreg %ptr, i32
; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v5, v0
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, -1, v5
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GCN2-NEXT: v_subrev_u32_e32 v0, vcc, 1, v5
; GCN2-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5
; GCN2-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN2-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc
@@ -9523,9 +9503,8 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_scalar(ptr inreg %ptr, i32
; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v5, v0
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GCN3-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v5
; GCN3-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5
-; GCN3-NEXT: v_add_u32_e32 v0, -1, v5
; GCN3-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN3-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc
; GCN3-NEXT: flat_atomic_cmpswap v0, v[1:2], v[4:5] glc
@@ -9557,8 +9536,7 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_offset_scalar(ptr inreg %ou
; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v5, v0
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, -1, v5
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GCN1-NEXT: v_subrev_i32_e32 v0, vcc, 1, v5
; GCN1-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5
; GCN1-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN1-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc
@@ -9587,8 +9565,7 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_offset_scalar(ptr inreg %ou
; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v5, v0
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, -1, v5
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GCN2-NEXT: v_subrev_u32_e32 v0, vcc, 1, v5
; GCN2-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5
; GCN2-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN2-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc
@@ -9617,9 +9594,8 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_offset_scalar(ptr inreg %ou
; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v5, v0
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GCN3-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v5
; GCN3-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5
-; GCN3-NEXT: v_add_u32_e32 v0, -1, v5
; GCN3-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GCN3-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc
; GCN3-NEXT: flat_atomic_cmpswap v0, v[1:2], v[4:5] offset:16 glc
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
index ffab56847edca..1a45bd978ccc1 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
@@ -10195,8 +10195,7 @@ define void @global_atomic_udec_wrap_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
; SI-NEXT: .LBB144_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v4
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; SI-NEXT: v_subrev_i32_e32 v3, vcc, 1, v4
; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
; SI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -10224,8 +10223,7 @@ define void @global_atomic_udec_wrap_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
; VI-NEXT: .LBB144_1: ; %atomicrmw.start
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; VI-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4
; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -10249,9 +10247,8 @@ define void @global_atomic_udec_wrap_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
; GFX9-NEXT: .LBB144_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4
; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
@@ -10282,8 +10279,7 @@ define void @global_atomic_udec_wrap_i32_noret_offset(ptr addrspace(1) %out, i32
; SI-NEXT: .LBB145_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v4
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; SI-NEXT: v_subrev_i32_e32 v3, vcc, 1, v4
; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
; SI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -10313,8 +10309,7 @@ define void @global_atomic_udec_wrap_i32_noret_offset(ptr addrspace(1) %out, i32
; VI-NEXT: .LBB145_1: ; %atomicrmw.start
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; VI-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4
; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -10338,9 +10333,8 @@ define void @global_atomic_udec_wrap_i32_noret_offset(ptr addrspace(1) %out, i32
; GFX9-NEXT: .LBB145_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4
; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
@@ -10374,8 +10368,7 @@ define i32 @global_atomic_udec_wrap_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mov_b32_e32 v5, v3
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v5
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; SI-NEXT: v_subrev_i32_e32 v3, vcc, 1, v5
; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v5, v2
; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
; SI-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc
@@ -10403,8 +10396,7 @@ define i32 @global_atomic_udec_wrap_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; VI-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4
; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
@@ -10429,9 +10421,8 @@ define i32 @global_atomic_udec_wrap_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4
; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
@@ -10464,8 +10455,7 @@ define i32 @global_atomic_udec_wrap_i32_ret_offset(ptr addrspace(1) %out, i32 %i
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mov_b32_e32 v5, v3
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v5
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; SI-NEXT: v_subrev_i32_e32 v3, vcc, 1, v5
; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v5, v2
; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
; SI-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc
@@ -10495,8 +10485,7 @@ define i32 @global_atomic_udec_wrap_i32_ret_offset(ptr addrspace(1) %out, i32 %i
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v1
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, 1, v1
; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2
; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
@@ -10520,9 +10509,8 @@ define i32 @global_atomic_udec_wrap_i32_ret_offset(ptr addrspace(1) %out, i32 %i
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4
; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
@@ -10560,8 +10548,7 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_scalar(ptr addrspace(1
; SI-NEXT: .LBB148_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, 1, v1
; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v1
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
@@ -10597,8 +10584,7 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_scalar(ptr addrspace(1
; VI-NEXT: .LBB148_1: ; %atomicrmw.start
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v2, vcc, -1, v3
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; VI-NEXT: v_subrev_u32_e32 v2, vcc, 1, v3
; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3
; VI-NEXT: s_or_b64 vcc, vcc, s[34:35]
; VI-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
@@ -10624,9 +10610,8 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_scalar(ptr addrspace(1
; GFX9-NEXT: .LBB148_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v1
; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v1
-; GFX9-NEXT: v_add_u32_e32 v0, -1, v1
; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
@@ -10663,8 +10648,7 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_offset_scalar(ptr addr
; SI-NEXT: .LBB149_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, 1, v1
; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v1
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
@@ -10702,8 +10686,7 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_offset_scalar(ptr addr
; VI-NEXT: .LBB149_1: ; %atomicrmw.start
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v2, vcc, -1, v3
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; VI-NEXT: v_subrev_u32_e32 v2, vcc, 1, v3
; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3
; VI-NEXT: s_or_b64 vcc, vcc, s[34:35]
; VI-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
@@ -10729,9 +10712,8 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_offset_scalar(ptr addr
; GFX9-NEXT: .LBB149_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v1
; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v1
-; GFX9-NEXT: v_add_u32_e32 v0, -1, v1
; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
@@ -10771,8 +10753,7 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_scalar(ptr addrspace(1) i
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, 1, v4
; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v4
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
; SI-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc
@@ -10809,8 +10790,7 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_scalar(ptr addrspace(1) i
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v5, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v5
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, 1, v5
; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5
; VI-NEXT: s_or_b64 vcc, vcc, s[34:35]
; VI-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc
@@ -10836,9 +10816,8 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_scalar(ptr addrspace(1) i
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v4, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v4
; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v4
-; GFX9-NEXT: v_add_u32_e32 v0, -1, v4
; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GFX9-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[3:4], s[4:5] glc
@@ -10876,8 +10855,7 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_offset_scalar(ptr addrspa
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, 1, v4
; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v4
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
; SI-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc
@@ -10914,8 +10892,7 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_offset_scalar(ptr addrspa
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v5, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v5
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, 1, v5
; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5
; VI-NEXT: s_or_b64 vcc, vcc, s[34:35]
; VI-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc
@@ -10941,9 +10918,8 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_offset_scalar(ptr addrspa
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v4, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v4
; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v4
-; GFX9-NEXT: v_add_u32_e32 v0, -1, v4
; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35]
; GFX9-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[3:4], s[4:5] offset:16 glc
diff --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
index c572185e7bbf6..4ea58a5890d35 100644
--- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
@@ -619,8 +619,7 @@ define i32 @atomicrmw_dec_private_i32(ptr addrspace(5) %ptr) {
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_add_i32_e32 v2, vcc, -1, v1
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_subrev_i32_e32 v2, vcc, 1, v1
; GCN-NEXT: v_cmp_lt_u32_e64 s[4:5], 4, v1
; GCN-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GCN-NEXT: v_cndmask_b32_e64 v2, v2, 4, s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index 83c521043025c..aa131ed6c9db1 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -377,63 +377,63 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX8-NEXT: v_mov_b32_e32 v10, 0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX8-NEXT: v_mov_b32_e32 v11, 0
-; GFX8-NEXT: s_movk_i32 s0, 0x7f
+; GFX8-NEXT: v_mov_b32_e32 v13, 0x7f
; GFX8-NEXT: .LBB1_1: ; %for.cond.preheader
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB1_2 Depth 2
; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_mov_b32 s0, 0
; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: s_mov_b32 s1, 0
; GFX8-NEXT: .LBB1_2: ; %for.body
; GFX8-NEXT: ; Parent Loop BB1_1 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffb000, v2
; GFX8-NEXT: v_addc_u32_e32 v5, vcc, -1, v3, vcc
-; GFX8-NEXT: flat_load_dwordx2 v[13:14], v[4:5]
+; GFX8-NEXT: flat_load_dwordx2 v[14:15], v[4:5]
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0xffffb800, v2
; GFX8-NEXT: v_addc_u32_e32 v7, vcc, -1, v3, vcc
-; GFX8-NEXT: flat_load_dwordx2 v[15:16], v[6:7]
+; GFX8-NEXT: flat_load_dwordx2 v[16:17], v[6:7]
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffc000, v2
; GFX8-NEXT: v_addc_u32_e32 v5, vcc, -1, v3, vcc
-; GFX8-NEXT: flat_load_dwordx2 v[17:18], v[4:5]
+; GFX8-NEXT: flat_load_dwordx2 v[18:19], v[4:5]
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0xffffc800, v2
; GFX8-NEXT: v_addc_u32_e32 v7, vcc, -1, v3, vcc
; GFX8-NEXT: flat_load_dwordx2 v[6:7], v[6:7]
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffd000, v2
; GFX8-NEXT: v_addc_u32_e32 v5, vcc, -1, v3, vcc
-; GFX8-NEXT: v_add_u32_e32 v19, vcc, 0xffffd800, v2
-; GFX8-NEXT: v_addc_u32_e32 v20, vcc, -1, v3, vcc
-; GFX8-NEXT: v_add_u32_e32 v21, vcc, 0xffffe000, v2
-; GFX8-NEXT: v_addc_u32_e32 v22, vcc, -1, v3, vcc
+; GFX8-NEXT: v_add_u32_e32 v20, vcc, 0xffffd800, v2
+; GFX8-NEXT: v_addc_u32_e32 v21, vcc, -1, v3, vcc
+; GFX8-NEXT: v_add_u32_e32 v22, vcc, 0xffffe000, v2
+; GFX8-NEXT: v_addc_u32_e32 v23, vcc, -1, v3, vcc
; GFX8-NEXT: flat_load_dwordx2 v[8:9], v[4:5]
-; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[19:20]
-; GFX8-NEXT: s_addk_i32 s1, 0x2000
-; GFX8-NEXT: s_cmp_gt_u32 s1, 0x3fffff
+; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[20:21]
+; GFX8-NEXT: s_addk_i32 s0, 0x2000
+; GFX8-NEXT: s_cmp_gt_u32 s0, 0x3fffff
; GFX8-NEXT: s_waitcnt vmcnt(5)
-; GFX8-NEXT: v_add_u32_e32 v23, vcc, v13, v10
-; GFX8-NEXT: v_addc_u32_e32 v24, vcc, v14, v11, vcc
+; GFX8-NEXT: v_add_u32_e32 v24, vcc, v14, v10
+; GFX8-NEXT: v_addc_u32_e32 v25, vcc, v15, v11, vcc
; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0xffffe800, v2
; GFX8-NEXT: v_addc_u32_e32 v11, vcc, -1, v3, vcc
-; GFX8-NEXT: v_add_u32_e32 v13, vcc, 0xfffff000, v2
-; GFX8-NEXT: flat_load_dwordx2 v[19:20], v[21:22]
+; GFX8-NEXT: v_add_u32_e32 v14, vcc, 0xfffff000, v2
+; GFX8-NEXT: flat_load_dwordx2 v[20:21], v[22:23]
; GFX8-NEXT: flat_load_dwordx2 v[10:11], v[10:11]
-; GFX8-NEXT: v_addc_u32_e32 v14, vcc, -1, v3, vcc
+; GFX8-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc
; GFX8-NEXT: s_waitcnt vmcnt(6)
-; GFX8-NEXT: v_add_u32_e32 v21, vcc, v15, v23
-; GFX8-NEXT: v_addc_u32_e32 v22, vcc, v16, v24, vcc
-; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0xfffff800, v2
-; GFX8-NEXT: flat_load_dwordx2 v[13:14], v[13:14]
-; GFX8-NEXT: v_addc_u32_e32 v16, vcc, -1, v3, vcc
-; GFX8-NEXT: flat_load_dwordx2 v[15:16], v[15:16]
+; GFX8-NEXT: v_add_u32_e32 v22, vcc, v16, v24
+; GFX8-NEXT: v_addc_u32_e32 v23, vcc, v17, v25, vcc
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, 0xfffff800, v2
+; GFX8-NEXT: flat_load_dwordx2 v[14:15], v[14:15]
+; GFX8-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc
+; GFX8-NEXT: flat_load_dwordx2 v[16:17], v[16:17]
; GFX8-NEXT: s_waitcnt vmcnt(7)
-; GFX8-NEXT: v_add_u32_e32 v21, vcc, v17, v21
-; GFX8-NEXT: v_addc_u32_e32 v22, vcc, v18, v22, vcc
-; GFX8-NEXT: flat_load_dwordx2 v[17:18], v[2:3]
+; GFX8-NEXT: v_add_u32_e32 v22, vcc, v18, v22
+; GFX8-NEXT: v_addc_u32_e32 v23, vcc, v19, v23, vcc
+; GFX8-NEXT: flat_load_dwordx2 v[18:19], v[2:3]
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x10000, v2
; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GFX8-NEXT: s_waitcnt vmcnt(7)
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v21
-; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v22, vcc
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v22
+; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v23, vcc
; GFX8-NEXT: s_waitcnt vmcnt(6)
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v8, v6
; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v9, v7, vcc
@@ -441,30 +441,27 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v6
; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v7, vcc
; GFX8-NEXT: s_waitcnt vmcnt(4)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v19, v4
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v20, v5, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v20, v4
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v21, v5, vcc
; GFX8-NEXT: s_waitcnt vmcnt(3)
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v10, v4
; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v11, v5, vcc
; GFX8-NEXT: s_waitcnt vmcnt(2)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v13, v4
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v14, v5, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v14, v4
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v15, v5, vcc
; GFX8-NEXT: s_waitcnt vmcnt(1)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v15, v4
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v16, v5, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v16, v4
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v17, v5, vcc
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, v17, v4
-; GFX8-NEXT: v_addc_u32_e32 v11, vcc, v18, v5, vcc
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v18, v4
+; GFX8-NEXT: v_addc_u32_e32 v11, vcc, v19, v5, vcc
; GFX8-NEXT: s_cbranch_scc0 .LBB1_2
; GFX8-NEXT: ; %bb.3: ; %while.cond.loopexit
; GFX8-NEXT: ; in Loop: Header=BB1_1 Depth=1
-; GFX8-NEXT: s_add_i32 s1, s0, -1
-; GFX8-NEXT: s_cmp_eq_u32 s0, 0
-; GFX8-NEXT: s_cbranch_scc1 .LBB1_5
-; GFX8-NEXT: ; %bb.4: ; in Loop: Header=BB1_1 Depth=1
-; GFX8-NEXT: s_mov_b32 s0, s1
-; GFX8-NEXT: s_branch .LBB1_1
-; GFX8-NEXT: .LBB1_5: ; %while.end
+; GFX8-NEXT: v_subrev_u32_e32 v13, vcc, 1, v13
+; GFX8-NEXT: s_and_b64 vcc, exec, vcc
+; GFX8-NEXT: s_cbranch_vccz .LBB1_1
+; GFX8-NEXT: ; %bb.4: ; %while.end
; GFX8-NEXT: v_mov_b32_e32 v1, s35
; GFX8-NEXT: v_add_u32_e32 v0, vcc, s34, v12
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
@@ -503,7 +500,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX900-NEXT: v_mov_b32_e32 v4, 0
; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX900-NEXT: v_mov_b32_e32 v5, 0
-; GFX900-NEXT: s_movk_i32 s5, 0x7f
+; GFX900-NEXT: v_mov_b32_e32 v7, 0x7f
; GFX900-NEXT: s_movk_i32 s2, 0xd000
; GFX900-NEXT: s_movk_i32 s3, 0xe000
; GFX900-NEXT: s_movk_i32 s4, 0xf000
@@ -511,77 +508,74 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX900-NEXT: ; =>This Loop Header: Depth=1
; GFX900-NEXT: ; Child Loop BB1_2 Depth 2
; GFX900-NEXT: v_mov_b32_e32 v3, v1
+; GFX900-NEXT: s_mov_b32 s5, 0
; GFX900-NEXT: v_mov_b32_e32 v2, v0
-; GFX900-NEXT: s_mov_b32 s6, 0
; GFX900-NEXT: .LBB1_2: ; %for.body
; GFX900-NEXT: ; Parent Loop BB1_1 Depth=1
; GFX900-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, 0xffffb000, v2
-; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, -1, v3, vcc
-; GFX900-NEXT: global_load_dwordx2 v[9:10], v[2:3], off offset:-4096
-; GFX900-NEXT: global_load_dwordx2 v[11:12], v[2:3], off offset:-2048
-; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, 0xffffc000, v2
-; GFX900-NEXT: global_load_dwordx2 v[7:8], v[7:8], off
-; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, -1, v3, vcc
-; GFX900-NEXT: global_load_dwordx2 v[17:18], v[13:14], off offset:-2048
-; GFX900-NEXT: global_load_dwordx2 v[19:20], v[13:14], off
-; GFX900-NEXT: v_add_co_u32_e32 v15, vcc, s2, v2
-; GFX900-NEXT: v_addc_co_u32_e32 v16, vcc, -1, v3, vcc
-; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, s3, v2
-; GFX900-NEXT: global_load_dwordx2 v[15:16], v[15:16], off offset:-2048
-; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, -1, v3, vcc
-; GFX900-NEXT: s_addk_i32 s6, 0x2000
-; GFX900-NEXT: s_cmp_gt_u32 s6, 0x3fffff
+; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, 0xffffb000, v2
+; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, -1, v3, vcc
+; GFX900-NEXT: global_load_dwordx2 v[10:11], v[2:3], off offset:-4096
+; GFX900-NEXT: global_load_dwordx2 v[12:13], v[2:3], off offset:-2048
+; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v2
+; GFX900-NEXT: global_load_dwordx2 v[8:9], v[8:9], off
+; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v3, vcc
+; GFX900-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048
+; GFX900-NEXT: global_load_dwordx2 v[20:21], v[14:15], off
+; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, s2, v2
+; GFX900-NEXT: v_addc_co_u32_e32 v17, vcc, -1, v3, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, s3, v2
+; GFX900-NEXT: global_load_dwordx2 v[16:17], v[16:17], off offset:-2048
+; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v3, vcc
+; GFX900-NEXT: s_addk_i32 s5, 0x2000
+; GFX900-NEXT: s_cmp_gt_u32 s5, 0x3fffff
; GFX900-NEXT: s_waitcnt vmcnt(3)
-; GFX900-NEXT: v_add_co_u32_e32 v21, vcc, v7, v4
-; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v8, v5, vcc
-; GFX900-NEXT: global_load_dwordx2 v[7:8], v[13:14], off offset:-4096
+; GFX900-NEXT: v_add_co_u32_e32 v22, vcc, v8, v4
+; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc
+; GFX900-NEXT: global_load_dwordx2 v[8:9], v[14:15], off offset:-4096
; GFX900-NEXT: s_waitcnt vmcnt(3)
-; GFX900-NEXT: v_add_co_u32_e64 v23, s[0:1], v17, v21
-; GFX900-NEXT: v_addc_co_u32_e64 v24, s[0:1], v18, v5, s[0:1]
-; GFX900-NEXT: global_load_dwordx2 v[17:18], v[13:14], off offset:-2048
-; GFX900-NEXT: global_load_dwordx2 v[21:22], v[13:14], off
+; GFX900-NEXT: v_add_co_u32_e64 v24, s[0:1], v18, v22
+; GFX900-NEXT: v_addc_co_u32_e64 v25, s[0:1], v19, v5, s[0:1]
+; GFX900-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048
+; GFX900-NEXT: global_load_dwordx2 v[22:23], v[14:15], off
; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, s4, v2
; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, -1, v3, vcc
; GFX900-NEXT: global_load_dwordx2 v[4:5], v[4:5], off offset:-2048
; GFX900-NEXT: s_waitcnt vmcnt(5)
-; GFX900-NEXT: v_add_co_u32_e32 v19, vcc, v19, v23
-; GFX900-NEXT: global_load_dwordx2 v[13:14], v[2:3], off
-; GFX900-NEXT: v_addc_co_u32_e32 v20, vcc, v20, v24, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v20, vcc, v20, v24
+; GFX900-NEXT: global_load_dwordx2 v[14:15], v[2:3], off
+; GFX900-NEXT: v_addc_co_u32_e32 v21, vcc, v21, v25, vcc
; GFX900-NEXT: v_add_co_u32_e32 v2, vcc, 0x10000, v2
; GFX900-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX900-NEXT: s_waitcnt vmcnt(5)
-; GFX900-NEXT: v_add_co_u32_e32 v15, vcc, v15, v19
-; GFX900-NEXT: v_addc_co_u32_e32 v16, vcc, v16, v20, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, v16, v20
+; GFX900-NEXT: v_addc_co_u32_e32 v17, vcc, v17, v21, vcc
; GFX900-NEXT: s_waitcnt vmcnt(4)
-; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, v7, v15
-; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v16, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, v8, v16
+; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v17, vcc
; GFX900-NEXT: s_waitcnt vmcnt(3)
-; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, v17, v7
-; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v18, v8, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, v18, v8
+; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v19, v9, vcc
; GFX900-NEXT: s_waitcnt vmcnt(2)
-; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, v21, v7
-; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v22, v8, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, v22, v8
+; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v23, v9, vcc
; GFX900-NEXT: s_waitcnt vmcnt(1)
-; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7
-; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v8, vcc
-; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v9, v4
-; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v10, v5, vcc
-; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v11, v4
-; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v12, v5, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8
+; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v9, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4
+; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v5, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4
+; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v5, vcc
; GFX900-NEXT: s_waitcnt vmcnt(0)
-; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v13, v4
-; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v14, v5, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v14, v4
+; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc
; GFX900-NEXT: s_cbranch_scc0 .LBB1_2
; GFX900-NEXT: ; %bb.3: ; %while.cond.loopexit
; GFX900-NEXT: ; in Loop: Header=BB1_1 Depth=1
-; GFX900-NEXT: s_add_i32 s0, s5, -1
-; GFX900-NEXT: s_cmp_eq_u32 s5, 0
-; GFX900-NEXT: s_cbranch_scc1 .LBB1_5
-; GFX900-NEXT: ; %bb.4: ; in Loop: Header=BB1_1 Depth=1
-; GFX900-NEXT: s_mov_b32 s5, s0
-; GFX900-NEXT: s_branch .LBB1_1
-; GFX900-NEXT: .LBB1_5: ; %while.end
+; GFX900-NEXT: v_subrev_co_u32_e32 v7, vcc, 1, v7
+; GFX900-NEXT: s_and_b64 vcc, exec, vcc
+; GFX900-NEXT: s_cbranch_vccz .LBB1_1
+; GFX900-NEXT: ; %bb.4: ; %while.end
; GFX900-NEXT: v_mov_b32_e32 v1, s35
; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v6
; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
@@ -612,7 +606,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: v_mov_b32_e32 v3, 0
-; GFX10-NEXT: s_movk_i32 s1, 0x7f
+; GFX10-NEXT: v_mov_b32_e32 v7, 0x7f
; GFX10-NEXT: v_and_b32_e32 v6, 0xfe000000, v1
; GFX10-NEXT: v_lshl_or_b32 v0, v0, 3, v6
; GFX10-NEXT: v_add_co_u32 v0, s0, v0, s34
@@ -624,77 +618,74 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX10-NEXT: ; Child Loop BB1_2 Depth 2
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: v_mov_b32_e32 v4, v0
-; GFX10-NEXT: s_mov_b32 s2, 0
+; GFX10-NEXT: s_mov_b32 s1, 0
; GFX10-NEXT: .LBB1_2: ; %for.body
; GFX10-NEXT: ; Parent Loop BB1_1 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v4, 0xffffb800
-; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, -1, v5, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v4, 0xffffc800
-; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, -1, v5, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v13, vcc_lo, v4, 0xffffd800
-; GFX10-NEXT: v_add_co_ci_u32_e32 v14, vcc_lo, -1, v5, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v17, vcc_lo, v4, 0xffffe800
+; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v4, 0xffffb800
+; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, -1, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v10, vcc_lo, v4, 0xffffc800
+; GFX10-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, -1, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v14, vcc_lo, v4, 0xffffd800
+; GFX10-NEXT: v_add_co_ci_u32_e32 v15, vcc_lo, -1, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v18, vcc_lo, v4, 0xffffe800
; GFX10-NEXT: s_clause 0x2
-; GFX10-NEXT: global_load_dwordx2 v[11:12], v[7:8], off offset:-2048
-; GFX10-NEXT: global_load_dwordx2 v[15:16], v[9:10], off offset:-2048
-; GFX10-NEXT: global_load_dwordx2 v[19:20], v[13:14], off offset:-2048
-; GFX10-NEXT: v_add_co_ci_u32_e32 v18, vcc_lo, -1, v5, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v21, vcc_lo, 0xfffff000, v4
-; GFX10-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, -1, v5, vcc_lo
+; GFX10-NEXT: global_load_dwordx2 v[12:13], v[8:9], off offset:-2048
+; GFX10-NEXT: global_load_dwordx2 v[16:17], v[10:11], off offset:-2048
+; GFX10-NEXT: global_load_dwordx2 v[20:21], v[14:15], off offset:-2048
+; GFX10-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, -1, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v22, vcc_lo, 0xfffff000, v4
+; GFX10-NEXT: v_add_co_ci_u32_e32 v23, vcc_lo, -1, v5, vcc_lo
; GFX10-NEXT: s_clause 0x7
-; GFX10-NEXT: global_load_dwordx2 v[23:24], v[17:18], off offset:-2048
-; GFX10-NEXT: global_load_dwordx2 v[7:8], v[7:8], off
-; GFX10-NEXT: global_load_dwordx2 v[9:10], v[9:10], off
-; GFX10-NEXT: global_load_dwordx2 v[13:14], v[13:14], off
-; GFX10-NEXT: global_load_dwordx2 v[25:26], v[17:18], off
-; GFX10-NEXT: global_load_dwordx2 v[27:28], v[21:22], off
-; GFX10-NEXT: global_load_dwordx2 v[29:30], v[4:5], off offset:-2048
-; GFX10-NEXT: global_load_dwordx2 v[31:32], v[4:5], off
+; GFX10-NEXT: global_load_dwordx2 v[24:25], v[18:19], off offset:-2048
+; GFX10-NEXT: global_load_dwordx2 v[8:9], v[8:9], off
+; GFX10-NEXT: global_load_dwordx2 v[10:11], v[10:11], off
+; GFX10-NEXT: global_load_dwordx2 v[14:15], v[14:15], off
+; GFX10-NEXT: global_load_dwordx2 v[26:27], v[18:19], off
+; GFX10-NEXT: global_load_dwordx2 v[28:29], v[22:23], off
+; GFX10-NEXT: global_load_dwordx2 v[30:31], v[4:5], off offset:-2048
+; GFX10-NEXT: global_load_dwordx2 v[32:33], v[4:5], off
; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, 0x10000, v4
; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
-; GFX10-NEXT: s_addk_i32 s2, 0x2000
-; GFX10-NEXT: s_cmp_gt_u32 s2, 0x3fffff
+; GFX10-NEXT: s_addk_i32 s1, 0x2000
+; GFX10-NEXT: s_cmp_gt_u32 s1, 0x3fffff
; GFX10-NEXT: s_waitcnt vmcnt(10)
-; GFX10-NEXT: v_add_co_u32 v2, s0, v11, v2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v12, v3, s0
+; GFX10-NEXT: v_add_co_u32 v2, s0, v12, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v13, v3, s0
; GFX10-NEXT: s_waitcnt vmcnt(6)
-; GFX10-NEXT: v_add_co_u32 v2, s0, v7, v2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v8, v3, s0
-; GFX10-NEXT: v_add_co_u32 v2, s0, v15, v2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v16, v3, s0
+; GFX10-NEXT: v_add_co_u32 v2, s0, v8, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v9, v3, s0
+; GFX10-NEXT: v_add_co_u32 v2, s0, v16, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v17, v3, s0
; GFX10-NEXT: s_waitcnt vmcnt(5)
-; GFX10-NEXT: v_add_co_u32 v2, s0, v9, v2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v10, v3, s0
-; GFX10-NEXT: v_add_co_u32 v2, s0, v19, v2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v20, v3, s0
+; GFX10-NEXT: v_add_co_u32 v2, s0, v10, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v11, v3, s0
+; GFX10-NEXT: v_add_co_u32 v2, s0, v20, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v21, v3, s0
; GFX10-NEXT: s_waitcnt vmcnt(4)
-; GFX10-NEXT: v_add_co_u32 v2, s0, v13, v2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v14, v3, s0
-; GFX10-NEXT: v_add_co_u32 v2, s0, v23, v2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v24, v3, s0
+; GFX10-NEXT: v_add_co_u32 v2, s0, v14, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v15, v3, s0
+; GFX10-NEXT: v_add_co_u32 v2, s0, v24, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v25, v3, s0
; GFX10-NEXT: s_waitcnt vmcnt(3)
-; GFX10-NEXT: v_add_co_u32 v2, s0, v25, v2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v26, v3, s0
+; GFX10-NEXT: v_add_co_u32 v2, s0, v26, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v27, v3, s0
; GFX10-NEXT: s_waitcnt vmcnt(2)
-; GFX10-NEXT: v_add_co_u32 v2, s0, v27, v2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v28, v3, s0
+; GFX10-NEXT: v_add_co_u32 v2, s0, v28, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v29, v3, s0
; GFX10-NEXT: s_waitcnt vmcnt(1)
-; GFX10-NEXT: v_add_co_u32 v2, s0, v29, v2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v30, v3, s0
+; GFX10-NEXT: v_add_co_u32 v2, s0, v30, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v31, v3, s0
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v31, v2
-; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v32, v3, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v32, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v33, v3, vcc_lo
; GFX10-NEXT: s_cbranch_scc0 .LBB1_2
; GFX10-NEXT: ; %bb.3: ; %while.cond.loopexit
; GFX10-NEXT: ; in Loop: Header=BB1_1 Depth=1
-; GFX10-NEXT: s_add_i32 s0, s1, -1
-; GFX10-NEXT: s_cmp_eq_u32 s1, 0
-; GFX10-NEXT: s_cbranch_scc1 .LBB1_5
-; GFX10-NEXT: ; %bb.4: ; in Loop: Header=BB1_1 Depth=1
-; GFX10-NEXT: s_mov_b32 s1, s0
-; GFX10-NEXT: s_branch .LBB1_1
-; GFX10-NEXT: .LBB1_5: ; %while.end
+; GFX10-NEXT: v_sub_co_u32 v7, s0, v7, 1
+; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s0
+; GFX10-NEXT: s_cbranch_vccz .LBB1_1
+; GFX10-NEXT: ; %bb.4: ; %while.end
; GFX10-NEXT: v_add_co_u32 v0, s0, s34, v6
; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s35, 0, s0
; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
@@ -731,15 +722,15 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, s0, v1
; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], 0, 0
-; GFX90A-NEXT: s_movk_i32 s3, 0x7f
+; GFX90A-NEXT: v_mov_b32_e32 v1, 0x7f
; GFX90A-NEXT: s_movk_i32 s0, 0xd000
; GFX90A-NEXT: s_movk_i32 s1, 0xe000
; GFX90A-NEXT: s_movk_i32 s2, 0xf000
; GFX90A-NEXT: .LBB1_1: ; %for.cond.preheader
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB1_2 Depth 2
+; GFX90A-NEXT: s_mov_b32 s3, 0
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[2:3], v[2:3] op_sel:[0,1]
-; GFX90A-NEXT: s_mov_b32 s4, 0
; GFX90A-NEXT: .LBB1_2: ; %for.body
; GFX90A-NEXT: ; Parent Loop BB1_1 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
@@ -766,49 +757,46 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX90A-NEXT: global_load_dwordx2 v[30:31], v[6:7], off
; GFX90A-NEXT: v_add_co_u32_e32 v6, vcc, 0x10000, v6
; GFX90A-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v7, vcc
-; GFX90A-NEXT: s_addk_i32 s4, 0x2000
-; GFX90A-NEXT: s_cmp_gt_u32 s4, 0x3fffff
+; GFX90A-NEXT: s_addk_i32 s3, 0x2000
+; GFX90A-NEXT: s_cmp_gt_u32 s3, 0x3fffff
; GFX90A-NEXT: s_waitcnt vmcnt(8)
-; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v12, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v13, v5, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v5, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(7)
-; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v18, v1
-; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v19, v4, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v18, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v19, v5, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(6)
-; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v20, v1
-; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v21, v4, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v20, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v21, v5, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(5)
-; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v16, v1
-; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v17, v4, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v16, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v17, v5, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(4)
-; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v24, v1
-; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v25, v4, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v24, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v25, v5, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(3)
-; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v26, v1
-; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v27, v4, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v26, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v27, v5, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(2)
-; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v28, v1
-; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v29, v4, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v28, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v29, v5, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(1)
-; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v14, v1
-; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v15, v4, vcc
-; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v8, v1
-; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v9, v4, vcc
-; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v10, v1
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v4, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v14, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v8, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v5, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v30, v1
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v30, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v31, v5, vcc
; GFX90A-NEXT: s_cbranch_scc0 .LBB1_2
; GFX90A-NEXT: ; %bb.3: ; %while.cond.loopexit
; GFX90A-NEXT: ; in Loop: Header=BB1_1 Depth=1
-; GFX90A-NEXT: s_add_i32 s4, s3, -1
-; GFX90A-NEXT: s_cmp_eq_u32 s3, 0
-; GFX90A-NEXT: s_cbranch_scc1 .LBB1_5
-; GFX90A-NEXT: ; %bb.4: ; in Loop: Header=BB1_1 Depth=1
-; GFX90A-NEXT: s_mov_b32 s3, s4
-; GFX90A-NEXT: s_branch .LBB1_1
-; GFX90A-NEXT: .LBB1_5: ; %while.end
+; GFX90A-NEXT: v_subrev_co_u32_e32 v1, vcc, 1, v1
+; GFX90A-NEXT: s_and_b64 vcc, exec, vcc
+; GFX90A-NEXT: s_cbranch_vccz .LBB1_1
+; GFX90A-NEXT: ; %bb.4: ; %while.end
; GFX90A-NEXT: v_mov_b32_e32 v1, s35
; GFX90A-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0
; GFX90A-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
@@ -828,8 +816,8 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX11-NEXT: s_swappc_b64 s[30:31], s[0:1]
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 17, v0
; GFX11-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v0, 0xff, v0
-; GFX11-NEXT: s_movk_i32 s1, 0x7f
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v7, 0x7f
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_and_b32_e32 v6, 0xfe000000, v1
; GFX11-NEXT: v_lshl_or_b32 v0, v0, 3, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -843,95 +831,92 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX11-NEXT: ; Child Loop BB1_2 Depth 2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX11-NEXT: s_mov_b32 s2, 0
+; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: .LBB1_2: ; %for.body
; GFX11-NEXT: ; Parent Loop BB1_1 Depth=1
; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v7, vcc_lo, v4, 0xffffc000
-; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, -1, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v9, vcc_lo, 0xffffc000, v4
+; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v4, 0xffffc000
+; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v5, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, 0xffffc000, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v5, vcc_lo
-; GFX11-NEXT: global_load_b64 v[13:14], v[7:8], off offset:-4096
-; GFX11-NEXT: v_add_co_u32 v11, vcc_lo, 0xffffd000, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v12, null, -1, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v15, vcc_lo, v4, 0xffffe000
-; GFX11-NEXT: global_load_b64 v[9:10], v[9:10], off offset:-2048
-; GFX11-NEXT: v_add_co_ci_u32_e64 v16, null, -1, v5, vcc_lo
-; GFX11-NEXT: global_load_b64 v[11:12], v[11:12], off offset:-2048
-; GFX11-NEXT: v_add_co_u32 v17, vcc_lo, 0xffffe000, v4
+; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v5, vcc_lo
+; GFX11-NEXT: global_load_b64 v[14:15], v[8:9], off offset:-4096
+; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, 0xffffd000, v4
+; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, -1, v5, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v4, 0xffffe000
+; GFX11-NEXT: global_load_b64 v[10:11], v[10:11], off offset:-2048
+; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, -1, v5, vcc_lo
+; GFX11-NEXT: global_load_b64 v[12:13], v[12:13], off offset:-2048
+; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, 0xffffe000, v4
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_load_b64 v[19:20], v[15:16], off offset:-4096
-; GFX11-NEXT: global_load_b64 v[7:8], v[7:8], off
-; GFX11-NEXT: v_add_co_ci_u32_e64 v18, null, -1, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v21, vcc_lo, 0xfffff000, v4
+; GFX11-NEXT: global_load_b64 v[20:21], v[16:17], off offset:-4096
+; GFX11-NEXT: global_load_b64 v[8:9], v[8:9], off
+; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, -1, v5, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, 0xfffff000, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v22, null, -1, v5, vcc_lo
+; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, -1, v5, vcc_lo
; GFX11-NEXT: s_clause 0x5
-; GFX11-NEXT: global_load_b64 v[17:18], v[17:18], off offset:-2048
-; GFX11-NEXT: global_load_b64 v[15:16], v[15:16], off
-; GFX11-NEXT: global_load_b64 v[21:22], v[21:22], off offset:-2048
-; GFX11-NEXT: global_load_b64 v[23:24], v[4:5], off offset:-4096
-; GFX11-NEXT: global_load_b64 v[25:26], v[4:5], off offset:-2048
-; GFX11-NEXT: global_load_b64 v[27:28], v[4:5], off
+; GFX11-NEXT: global_load_b64 v[18:19], v[18:19], off offset:-2048
+; GFX11-NEXT: global_load_b64 v[16:17], v[16:17], off
+; GFX11-NEXT: global_load_b64 v[22:23], v[22:23], off offset:-2048
+; GFX11-NEXT: global_load_b64 v[24:25], v[4:5], off offset:-4096
+; GFX11-NEXT: global_load_b64 v[26:27], v[4:5], off offset:-2048
+; GFX11-NEXT: global_load_b64 v[28:29], v[4:5], off
; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x10000, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo
-; GFX11-NEXT: s_addk_i32 s2, 0x2000
-; GFX11-NEXT: s_cmp_gt_u32 s2, 0x3fffff
+; GFX11-NEXT: s_addk_i32 s1, 0x2000
+; GFX11-NEXT: s_cmp_gt_u32 s1, 0x3fffff
; GFX11-NEXT: s_waitcnt vmcnt(10)
-; GFX11-NEXT: v_add_co_u32 v2, s0, v13, v2
+; GFX11-NEXT: v_add_co_u32 v2, s0, v14, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v14, v3, s0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v15, v3, s0
; GFX11-NEXT: s_waitcnt vmcnt(9)
-; GFX11-NEXT: v_add_co_u32 v2, s0, v9, v2
+; GFX11-NEXT: v_add_co_u32 v2, s0, v10, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v10, v3, s0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v11, v3, s0
; GFX11-NEXT: s_waitcnt vmcnt(6)
-; GFX11-NEXT: v_add_co_u32 v2, s0, v7, v2
+; GFX11-NEXT: v_add_co_u32 v2, s0, v8, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v8, v3, s0
-; GFX11-NEXT: v_add_co_u32 v2, s0, v11, v2
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v9, v3, s0
+; GFX11-NEXT: v_add_co_u32 v2, s0, v12, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v12, v3, s0
-; GFX11-NEXT: v_add_co_u32 v2, s0, v19, v2
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v13, v3, s0
+; GFX11-NEXT: v_add_co_u32 v2, s0, v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v20, v3, s0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v21, v3, s0
; GFX11-NEXT: s_waitcnt vmcnt(5)
-; GFX11-NEXT: v_add_co_u32 v2, s0, v17, v2
+; GFX11-NEXT: v_add_co_u32 v2, s0, v18, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v18, v3, s0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v19, v3, s0
; GFX11-NEXT: s_waitcnt vmcnt(4)
-; GFX11-NEXT: v_add_co_u32 v2, s0, v15, v2
+; GFX11-NEXT: v_add_co_u32 v2, s0, v16, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v16, v3, s0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v17, v3, s0
; GFX11-NEXT: s_waitcnt vmcnt(3)
-; GFX11-NEXT: v_add_co_u32 v2, s0, v21, v2
+; GFX11-NEXT: v_add_co_u32 v2, s0, v22, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v22, v3, s0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v23, v3, s0
; GFX11-NEXT: s_waitcnt vmcnt(2)
-; GFX11-NEXT: v_add_co_u32 v2, s0, v23, v2
+; GFX11-NEXT: v_add_co_u32 v2, s0, v24, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v24, v3, s0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v25, v3, s0
; GFX11-NEXT: s_waitcnt vmcnt(1)
-; GFX11-NEXT: v_add_co_u32 v2, s0, v25, v2
+; GFX11-NEXT: v_add_co_u32 v2, s0, v26, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v26, v3, s0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v27, v3, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v27, v2
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v28, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v28, v3, vcc_lo
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v29, v3, vcc_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.3: ; %while.cond.loopexit
; GFX11-NEXT: ; in Loop: Header=BB1_1 Depth=1
-; GFX11-NEXT: s_add_i32 s0, s1, -1
-; GFX11-NEXT: s_cmp_eq_u32 s1, 0
-; GFX11-NEXT: s_cbranch_scc1 .LBB1_5
-; GFX11-NEXT: ; %bb.4: ; in Loop: Header=BB1_1 Depth=1
-; GFX11-NEXT: s_mov_b32 s1, s0
-; GFX11-NEXT: s_branch .LBB1_1
-; GFX11-NEXT: .LBB1_5: ; %while.end
+; GFX11-NEXT: v_sub_co_u32 v7, s0, v7, 1
+; GFX11-NEXT: s_and_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB1_1
+; GFX11-NEXT: ; %bb.4: ; %while.end
; GFX11-NEXT: v_add_co_u32 v0, s0, s34, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s35, 0, s0
diff --git a/llvm/test/CodeGen/AMDGPU/sad.ll b/llvm/test/CodeGen/AMDGPU/sad.ll
index 0b58b328bbfb6..68c33487b0596 100644
--- a/llvm/test/CodeGen/AMDGPU/sad.ll
+++ b/llvm/test/CodeGen/AMDGPU/sad.ll
@@ -67,9 +67,9 @@ define amdgpu_kernel void @v_sad_u32_pat2(ptr addrspace(1) %out, i32 %a, i32 %b,
; GCN-NEXT: s_mov_b32 flat_scratch_lo, s13
; GCN-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v1, s2
-; GCN-NEXT: v_sad_u32 v2, s0, v0, v1
+; GCN-NEXT: v_sad_u32 v2, s1, v0, v1
; GCN-NEXT: v_mov_b32_e32 v0, s4
; GCN-NEXT: v_mov_b32_e32 v1, s5
; GCN-NEXT: flat_store_dword v[0:1], v2
@@ -249,10 +249,10 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(ptr addrspace(1) %out, i
; GCN-NEXT: s_addc_u32 s21, s21, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_sub_i32 s3, s0, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v1, s2
; GCN-NEXT: v_mov_b32_e32 v2, s3
-; GCN-NEXT: v_sad_u32 v3, s0, v0, v1
+; GCN-NEXT: v_sad_u32 v3, s1, v0, v1
; GCN-NEXT: buffer_store_dword v2, v0, s[20:23], 0 offen
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s4
@@ -284,8 +284,8 @@ define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(ptr addrspace(1) %out
; GCN-NEXT: s_add_u32 s20, s20, s17
; GCN-NEXT: s_addc_u32 s21, s21, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_min_u32 s3, s0, s1
-; GCN-NEXT: s_max_u32 s0, s0, s1
+; GCN-NEXT: s_min_u32 s3, s1, s0
+; GCN-NEXT: s_max_u32 s0, s1, s0
; GCN-NEXT: s_sub_i32 s0, s0, s3
; GCN-NEXT: v_mov_b32_e32 v0, s4
; GCN-NEXT: v_mov_b32_e32 v2, s0
@@ -583,17 +583,17 @@ define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat2(ptr addrspace(1) %
; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2
; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0
; GCN-NEXT: s_add_i32 s12, s12, s17
-; GCN-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; GCN-NEXT: s_mov_b32 flat_scratch_lo, s13
+; GCN-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_sub_i32 s3, s0, s3
-; GCN-NEXT: s_sub_i32 s6, s1, s0
-; GCN-NEXT: s_cmp_lt_u32 s1, s0
-; GCN-NEXT: s_cselect_b32 s0, s3, s6
-; GCN-NEXT: s_add_i32 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_sub_i32 s0, s0, s3
+; GCN-NEXT: v_mov_b32_e32 v1, s0
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, s1, v0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT: v_add_i32_e32 v2, vcc, s2, v0
; GCN-NEXT: v_mov_b32_e32 v0, s4
; GCN-NEXT: v_mov_b32_e32 v1, s5
-; GCN-NEXT: v_mov_b32_e32 v2, s0
; GCN-NEXT: flat_store_dword v[0:1], v2
; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
More information about the llvm-commits
mailing list