[llvm] AMDGPU: Add tests for atomics with AGPR operands (PR #155820)
Pierre van Houtryve via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 28 05:01:16 PDT 2025
================
@@ -0,0 +1,1067 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx90a < %s | FileCheck %s
+
+;---------------------------------------------------------------------
+; i32 cases
+;---------------------------------------------------------------------
+
+define void @flat_atomic_cmpxchg_i32_ret_av_av__av(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_av_av__av:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v3
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v2
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=^VA"()
+ %data1 = call i32 asm "; def $0", "=^VA"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "^VA"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_av_av__v(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_av_av__v:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v3
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v2
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=^VA"()
+ %data1 = call i32 asm "; def $0", "=^VA"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "v"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_av_av__a(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_av_av__a:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v3
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v2
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=^VA"()
+ %data1 = call i32 asm "; def $0", "=^VA"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "a"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_a_a__a(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_a_a__a:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a1
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v2, a1
+; CHECK-NEXT: v_accvgpr_read_b32 v3, a0
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=a"()
+ %data1 = call i32 asm "; def $0", "=a"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "a"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_a_a__v(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_a_a__v:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a1
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v2, a1
+; CHECK-NEXT: v_accvgpr_read_b32 v3, a0
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=a"()
+ %data1 = call i32 asm "; def $0", "=a"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "v"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_v_a__v(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_v_a__v:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v2, a0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v3
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=v"()
+ %data1 = call i32 asm "; def $0", "=a"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "v"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_a_v__v(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_a_v__v:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v3, a0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v2
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=a"()
+ %data1 = call i32 asm "; def $0", "=v"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "v"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_v_v__a(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_v_v__a:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v3
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v2
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=v"()
+ %data1 = call i32 asm "; def $0", "=v"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "a"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_av_v__av(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_av_v__av:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v3
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v2
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=^VA"()
+ %data1 = call i32 asm "; def $0", "=v"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "^VA"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_v_av__av(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_v_av__av:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v3
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v2
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=v"()
+ %data1 = call i32 asm "; def $0", "=^VA"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "^VA"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_av_a__av(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_av_a__av:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v2, a0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v3
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=^VA"()
+ %data1 = call i32 asm "; def $0", "=a"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "^VA"(i32 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i32_ret_a_av__av(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i32_ret_a_av__av:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v3, a0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v2
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap v0, v[0:1], v[2:3] offset:40 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v0
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i32], ptr %ptr, i32 0, i32 10
+ %data0 = call i32 asm "; def $0", "=a"()
+ %data1 = call i32 asm "; def $0", "=^VA"()
+ %pair = cmpxchg ptr %gep.0, i32 %data0, i32 %data1 seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ call void asm "; use $0", "^VA"(i32 %result)
+ ret void
+}
+
+;---------------------------------------------------------------------
+; i64 cases
+;---------------------------------------------------------------------
+
+define void @flat_atomic_cmpxchg_i64_ret_av_av__av(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_av_av__av:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, 0x50, v0
+; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v7
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[2:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB12_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[6:7], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB12_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB12_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: .LBB12_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:5]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=^VA"()
+ %data1 = call i64 asm "; def $0", "=^VA"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "^VA"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_av_av__v(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_av_av__v:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, 0x50, v0
+; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v7
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[2:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB13_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[6:7], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB13_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB13_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: .LBB13_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:5]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=^VA"()
+ %data1 = call i64 asm "; def $0", "=^VA"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "v"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_av_av__a(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_av_av__a:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v4, vcc, 0x50, v0
+; CHECK-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v1, vcc
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v5
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[2:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ; implicit-def: $agpr0_agpr1
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB14_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v0
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v1
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB14_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB14_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v5
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: .LBB14_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=^VA"()
+ %data1 = call i64 asm "; def $0", "=^VA"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "a"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_a_a__a(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_a_a__a:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v4, vcc, 0x50, v0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v3, a1
+; CHECK-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v1, vcc
+; CHECK-NEXT: v_accvgpr_read_b32 v2, a0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v0, a0
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_accvgpr_read_b32 v1, a1
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v5
+; CHECK-NEXT: ; implicit-def: $agpr0_agpr1
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB15_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v0
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v1
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB15_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB15_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v5
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: .LBB15_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=a"()
+ %data1 = call i64 asm "; def $0", "=a"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "a"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_a_a__v(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_a_a__v:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, 0x50, v0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v3, a1
+; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
+; CHECK-NEXT: v_accvgpr_read_b32 v2, a0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v0, a0
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_accvgpr_read_b32 v1, a1
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v7
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB16_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[6:7], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB16_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB16_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: .LBB16_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:5]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=a"()
+ %data1 = call i64 asm "; def $0", "=a"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "v"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_v_a__v(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_v_a__v:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, 0x50, v0
+; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v0, a0
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_accvgpr_read_b32 v1, a1
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v7
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[2:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB17_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[6:7], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB17_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB17_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: .LBB17_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:5]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=v"()
+ %data1 = call i64 asm "; def $0", "=a"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "v"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_a_v__v(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_a_v__v:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, 0x50, v0
+; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v3, a1
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_accvgpr_read_b32 v2, a0
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v7
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB18_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[6:7], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB18_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB18_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: .LBB18_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:5]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=a"()
+ %data1 = call i64 asm "; def $0", "=v"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "v"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_v_v__a(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_v_v__a:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v4, vcc, 0x50, v0
+; CHECK-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v1, vcc
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v5
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[2:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ; implicit-def: $agpr0_agpr1
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB19_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v0
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v1
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB19_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB19_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v5
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: .LBB19_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=v"()
+ %data1 = call i64 asm "; def $0", "=v"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "a"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_av_v__av(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_av_v__av:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, 0x50, v0
+; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v7
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[2:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB20_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[6:7], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB20_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB20_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: .LBB20_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:5]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=^VA"()
+ %data1 = call i64 asm "; def $0", "=v"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "^VA"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_v_av__av(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_v_av__av:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, 0x50, v0
+; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v7
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[2:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB21_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[6:7], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB21_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB21_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: .LBB21_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:5]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=v"()
+ %data1 = call i64 asm "; def $0", "=^VA"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "^VA"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_av_a__av(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_av_a__av:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, 0x50, v0
+; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v0, a0
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_accvgpr_read_b32 v1, a1
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v7
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[2:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB22_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[6:7], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB22_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB22_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: .LBB22_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:5]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=^VA"()
+ %data1 = call i64 asm "; def $0", "=a"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "^VA"(i64 %result)
+ ret void
+}
+
+define void @flat_atomic_cmpxchg_i64_ret_a_av__av(ptr %ptr) #0 {
+; CHECK-LABEL: flat_atomic_cmpxchg_i64_ret_a_av__av:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, 0x50, v0
+; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def a[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_accvgpr_read_b32 v3, a1
+; CHECK-NEXT: s_mov_b64 s[4:5], src_private_base
+; CHECK-NEXT: v_accvgpr_read_b32 v2, a0
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s5, v7
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; def v[0:1]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
+; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB23_2
+; CHECK-NEXT: ; %bb.1: ; %atomicrmw.global
+; CHECK-NEXT: buffer_wbl2
+; CHECK-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[6:7], v[0:3] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_invl2
+; CHECK-NEXT: buffer_wbinvl1_vol
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
+; CHECK-NEXT: .LBB23_2: ; %Flow
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; CHECK-NEXT: s_cbranch_execz .LBB23_4
+; CHECK-NEXT: ; %bb.3: ; %atomicrmw.private
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc
+; CHECK-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; CHECK-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
+; CHECK-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; CHECK-NEXT: .LBB23_4: ; %atomicrmw.phi
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use v[4:5]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %gep.0 = getelementptr inbounds [512 x i64], ptr %ptr, i64 0, i64 10
+ %data0 = call i64 asm "; def $0", "=a"()
+ %data1 = call i64 asm "; def $0", "=^VA"()
+ %pair = cmpxchg ptr %gep.0, i64 %data0, i64 %data1 seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ call void asm "; use $0", "^VA"(i64 %result)
+ ret void
+}
+
+attributes #0 = { nounwind "amdgpu-waves-per-eu"="10,10" }
+
----------------
Pierre-vh wrote:
lots of trailing white lines here
https://github.com/llvm/llvm-project/pull/155820
More information about the llvm-commits
mailing list