[clang] [libc] [llvm] [mlir] [OpenMP 6.0] Allow only `byref` arguments with `need_device_addr` modifier on `adjust_args` clause (PR #149573)
Fazlay Rabbi via cfe-commits
cfe-commits at lists.llvm.org
Fri Jul 18 12:49:53 PDT 2025
https://github.com/mdfazlay updated https://github.com/llvm/llvm-project/pull/149573
>From 1eb5ac333408ca71ab5f5fcc40c8d093cf3ff94f Mon Sep 17 00:00:00 2001
From: Fazlay Rabbi <fazlay.rabbi at intel.com>
Date: Fri, 18 Jul 2025 12:29:08 -0700
Subject: [PATCH 01/12] [OpenMP 6.0] Allow only `byref` arguments with
`need_device_addr` modifier on `adjust_args` clause
If the `need_device_addr` adjust-op modifier is present, each list item
that appears in the clause must refer to an argument in the declaration of
the function variant that has a reference type.
Reference:
OpenMP 6.0 [Sec 9.6.2, page 332, line 31-33, adjust_args clause, Restrictions]
---
.../clang/Basic/DiagnosticParseKinds.td | 3 +++
clang/lib/Sema/SemaOpenMP.cpp | 15 +++++++++++
.../declare_variant_clauses_ast_print.cpp | 26 +++++++++----------
.../declare_variant_clauses_messages.cpp | 12 +++++++++
4 files changed, 43 insertions(+), 13 deletions(-)
diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td
index 35903af998fbe..995684f472385 100644
--- a/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -1594,6 +1594,9 @@ def err_omp_unknown_adjust_args_op
def err_omp_declare_variant_wrong_clause : Error<
"expected %select{'match'|'match', 'adjust_args', or 'append_args'}0 clause "
"on 'omp declare variant' directive">;
+def err_omp_non_by_ref_need_device_addr_modifier_argument
+ : Error<"expected reference type argument on 'adjust_args' clause with "
+ "'need_device_addr' modifier">;
def err_omp_declare_variant_duplicate_nested_trait : Error<
"nested OpenMP context selector contains duplicated trait '%0'"
" in selector '%1' and set '%2' with different score">;
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 4ecc9b0d4c5c8..a26f8f1691435 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -7612,6 +7612,21 @@ void SemaOpenMP::ActOnOpenMPDeclareVariantDirective(
return;
}
+ // OpenMP 6.0 [9.6.2 (page 332, line 31-33, adjust_args clause, Restrictions]
+ // If the `need_device_addr` adjust-op modifier is present, each list item
+ // that appears in the clause must refer to an argument in the declaration of
+ // the function variant that has a reference type
+ for (Expr *E : AdjustArgsNeedDeviceAddr) {
+ E = E->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (!VD->getType()->isReferenceType())
+ Diag(E->getExprLoc(),
+ diag::err_omp_non_by_ref_need_device_addr_modifier_argument);
+ }
+ }
+ }
+
auto *NewAttr = OMPDeclareVariantAttr::CreateImplicit(
getASTContext(), VariantRef, &TI,
const_cast<Expr **>(AdjustArgsNothing.data()), AdjustArgsNothing.size(),
diff --git a/clang/test/OpenMP/declare_variant_clauses_ast_print.cpp b/clang/test/OpenMP/declare_variant_clauses_ast_print.cpp
index c14e19cc8b7ec..e98a23e865cf8 100644
--- a/clang/test/OpenMP/declare_variant_clauses_ast_print.cpp
+++ b/clang/test/OpenMP/declare_variant_clauses_ast_print.cpp
@@ -38,11 +38,11 @@
#ifndef HEADER
#define HEADER
-void foo_v1(float *AAA, float *BBB, int *I) {return;}
-void foo_v2(float *AAA, float *BBB, int *I) {return;}
-void foo_v3(float *AAA, float *BBB, int *I) {return;}
+void foo_v1(float *AAA, float *BBB, int &CCC, int *I) {return;}
+void foo_v2(float *AAA, float *BBB, int &CCC, int *I) {return;}
+void foo_v3(float *AAA, float *BBB, int &CCC, int *I) {return;}
-//DUMP: FunctionDecl{{.*}} foo 'void (float *, float *, int *)'
+//DUMP: FunctionDecl{{.*}} foo 'void (float *, float *, int &, int *)'
//DUMP: OMPDeclareVariantAttr{{.*}}device={arch(x86, x86_64)}
//DUMP: DeclRefExpr{{.*}}Function{{.*}}foo_v3
//DUMP: DeclRefExpr{{.*}}ParmVar{{.*}}'I'
@@ -54,9 +54,9 @@ void foo_v3(float *AAA, float *BBB, int *I) {return;}
//DUMP: DeclRefExpr{{.*}}Function{{.*}}foo_v1
//DUMP: DeclRefExpr{{.*}}ParmVar{{.*}}'AAA'
//DUMP: DeclRefExpr{{.*}}ParmVar{{.*}}'BBB'
-//PRINT: #pragma omp declare variant(foo_v3) match(construct={dispatch}, device={arch(x86, x86_64)}) adjust_args(nothing:I) adjust_args(need_device_ptr:BBB) adjust_args(need_device_addr:AAA)
+//PRINT: #pragma omp declare variant(foo_v3) match(construct={dispatch}, device={arch(x86, x86_64)}) adjust_args(nothing:I) adjust_args(need_device_ptr:BBB) adjust_args(need_device_addr:CCC)
-//PRINT: #pragma omp declare variant(foo_v2) match(construct={dispatch}, device={arch(ppc)}) adjust_args(need_device_ptr:AAA) adjust_args(need_device_addr:BBB)
+//PRINT: #pragma omp declare variant(foo_v2) match(construct={dispatch}, device={arch(ppc)}) adjust_args(need_device_ptr:AAA) adjust_args(need_device_addr:CCC)
//PRINT: omp declare variant(foo_v1) match(construct={dispatch}, device={arch(arm)}) adjust_args(need_device_ptr:AAA,BBB)
@@ -67,33 +67,33 @@ void foo_v3(float *AAA, float *BBB, int *I) {return;}
#pragma omp declare variant(foo_v2) \
match(construct={dispatch}, device={arch(ppc)}), \
adjust_args(need_device_ptr:AAA) \
- adjust_args(need_device_addr:BBB)
+ adjust_args(need_device_addr:CCC)
#pragma omp declare variant(foo_v3) \
adjust_args(need_device_ptr:BBB) adjust_args(nothing:I) \
- adjust_args(need_device_addr:AAA) \
+ adjust_args(need_device_addr:CCC) \
match(construct={dispatch}, device={arch(x86,x86_64)})
-void foo(float *AAA, float *BBB, int *I) {return;}
+void foo(float *AAA, float *BBB, int &CCC, int *I) {return;}
-void Foo_Var(float *AAA, float *BBB, float *CCC) {return;}
+void Foo_Var(float *AAA, float *BBB, float *&CCC) {return;}
#pragma omp declare variant(Foo_Var) \
match(construct={dispatch}, device={arch(x86_64)}) \
adjust_args(need_device_ptr:AAA) adjust_args(nothing:BBB) \
adjust_args(need_device_addr:CCC)
template<typename T>
-void Foo(T *AAA, T *BBB, T *CCC) {return;}
+void Foo(T *AAA, T *BBB, T *&CCC) {return;}
//PRINT: #pragma omp declare variant(Foo_Var) match(construct={dispatch}, device={arch(x86_64)}) adjust_args(nothing:BBB) adjust_args(need_device_ptr:AAA) adjust_args(need_device_addr:CCC)
-//DUMP: FunctionDecl{{.*}} Foo 'void (T *, T *, T *)'
+//DUMP: FunctionDecl{{.*}} Foo 'void (T *, T *, T *&)'
//DUMP: OMPDeclareVariantAttr{{.*}}device={arch(x86_64)}
//DUMP: DeclRefExpr{{.*}}Function{{.*}}Foo_Var
//DUMP: DeclRefExpr{{.*}}ParmVar{{.*}}'BBB'
//DUMP: DeclRefExpr{{.*}}ParmVar{{.*}}'AAA'
//DUMP: DeclRefExpr{{.*}}ParmVar{{.*}}'CCC'
//
-//DUMP: FunctionDecl{{.*}} Foo 'void (float *, float *, float *)'
+//DUMP: FunctionDecl{{.*}} Foo 'void (float *, float *, float *&)'
//DUMP: OMPDeclareVariantAttr{{.*}}device={arch(x86_64)}
//DUMP: DeclRefExpr{{.*}}Function{{.*}}Foo_Var
//DUMP: DeclRefExpr{{.*}}ParmVar{{.*}}'BBB'
diff --git a/clang/test/OpenMP/declare_variant_clauses_messages.cpp b/clang/test/OpenMP/declare_variant_clauses_messages.cpp
index bca91481220ff..916d15fde9ff2 100644
--- a/clang/test/OpenMP/declare_variant_clauses_messages.cpp
+++ b/clang/test/OpenMP/declare_variant_clauses_messages.cpp
@@ -91,6 +91,7 @@ void foo_v1(float *AAA, float *BBB, int *I) { return; }
void foo_v2(float *AAA, float *BBB, int *I) { return; }
void foo_v3(float *AAA, float *BBB, int *I) { return; }
void foo_v4(float *AAA, float *BBB, int *I, omp_interop_t IOp) { return; }
+void foo_v5(float *AAA, float *BBB, int I) { return; }
#if _OPENMP >= 202011 // At least OpenMP 5.1
void vararg_foo(const char *fmt, omp_interop_t it, ...);
@@ -129,6 +130,11 @@ void vararg_bar2(const char *fmt) { return; }
adjust_args(nothing:J) \
match(construct={dispatch}, device={arch(x86,x86_64)})
+// expected-error at +2 {{expected reference type argument on 'adjust_args' clause with 'need_device_addr' modifier}}
+#pragma omp declare variant(foo_v1) \
+ adjust_args(need_device_addr:AAA) \
+ match(construct={dispatch}, device={arch(x86,x86_64)})
+
// expected-error at +2 {{expected reference to one of the parameters of function 'foo'}}
#pragma omp declare variant(foo_v3) \
adjust_args(nothing:Other) \
@@ -218,6 +224,12 @@ void vararg_bar2(const char *fmt) { return; }
void foo(float *AAA, float *BBB, int *I) { return; }
+// expected-error at +2 {{expected reference type argument on 'adjust_args' clause with 'need_device_addr' modifier}}
+#pragma omp declare variant(foo_v5) \
+ adjust_args(need_device_addr:I) \
+ match(construct={dispatch}, device={arch(x86,x86_64)})
+void foo5(float *AAA, float *BBB, int I) { return; }
+
#endif // NO_INTEROP_T_DEF
#ifdef C
>From e20c4053522793ca078d6ec0a5e61dfb59529e23 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 18 Jul 2025 10:33:59 -0700
Subject: [PATCH 02/12] [RISCV][IA] Precommit tests for deinterleaveN of
masked.load
---
.../RISCV/rvv/vector-deinterleave-load.ll | 161 ++++++++++++++++++
1 file changed, 161 insertions(+)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index 9af92aa995f1f..578b67e284c5c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -538,3 +538,164 @@ define { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x
%res7 = insertvalue { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> } %res6, <vscale x 8 x i8> %t7, 7
ret { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> } %res7
}
+
+define {<vscale x 16 x i8>, <vscale x 16 x i8>} @masked_load_factor2(ptr %p) {
+; CHECK-LABEL: masked_load_factor2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl4r.v v12, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: ret
+ %vec = call <vscale x 32 x i8> @llvm.masked.load(ptr %p, i32 4, <vscale x 32 x i1> splat (i1 true), <vscale x 32 x i8> poison)
+ %deinterleaved.results = call {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> %vec)
+ ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %deinterleaved.results
+}
+
+define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_loat_factor4(ptr %p) {
+; CHECK-LABEL: masked_loat_factor4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 2
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: vl4r.v v8, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vlseg4e8.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %vec = call <vscale x 32 x i8> @llvm.masked.load(ptr %p, i32 4, <vscale x 32 x i1> splat (i1 true), <vscale x 32 x i8> poison)
+ %deinterleaved.results = call {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave4.nxv32i8(<vscale x 32 x i8> %vec)
+ ret {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} %deinterleaved.results
+}
+
+define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_loat_factor4_mask(ptr %p, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: masked_loat_factor4_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: add a3, a1, a2
+; CHECK-NEXT: vmv.v.v v9, v8
+; CHECK-NEXT: srli a4, a2, 2
+; CHECK-NEXT: vmv.v.v v10, v8
+; CHECK-NEXT: srli a5, a2, 3
+; CHECK-NEXT: vmv.v.v v11, v8
+; CHECK-NEXT: vsseg4e8.v v8, (a1)
+; CHECK-NEXT: vl1r.v v8, (a1)
+; CHECK-NEXT: add a1, a4, a5
+; CHECK-NEXT: vl1r.v v9, (a3)
+; CHECK-NEXT: add a3, a3, a2
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: vl1r.v v10, (a3)
+; CHECK-NEXT: vl1r.v v11, (a2)
+; CHECK-NEXT: vmsne.vi v9, v9, 0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: vmsne.vi v8, v10, 0
+; CHECK-NEXT: vmsne.vi v10, v11, 0
+; CHECK-NEXT: vsetvli zero, a4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vx v0, v9, a5
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vx v0, v8, a4
+; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v0, v10, a1
+; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vlseg4e8.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %interleaved.mask = tail call <vscale x 32 x i1> @llvm.vector.interleave4.nxv32i1(<vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask)
+ %vec = call <vscale x 32 x i8> @llvm.masked.load(ptr %p, i32 4, <vscale x 32 x i1> %interleaved.mask, <vscale x 32 x i8> poison)
+ %deinterleaved.results = call {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave4.nxv32i8(<vscale x 32 x i8> %vec)
+ ret {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} %deinterleaved.results
+}
+
+; Negative test - some of the deinterleaved elements might come from the
+; passthru not the load
+define {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @masked_loat_factor4_passthru(ptr %p, <vscale x 8 x i1> %mask, <vscale x 32 x i8> %passthru) {
+; CHECK-LABEL: masked_loat_factor4_passthru:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: add a3, a1, a2
+; CHECK-NEXT: vmv.v.v v13, v12
+; CHECK-NEXT: srli a4, a2, 2
+; CHECK-NEXT: vmv.v.v v14, v12
+; CHECK-NEXT: srli a5, a2, 3
+; CHECK-NEXT: vmv.v.v v15, v12
+; CHECK-NEXT: vsseg4e8.v v12, (a1)
+; CHECK-NEXT: vl1r.v v12, (a1)
+; CHECK-NEXT: add a1, a4, a5
+; CHECK-NEXT: vl1r.v v13, (a3)
+; CHECK-NEXT: add a3, a3, a2
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: vl1r.v v14, (a3)
+; CHECK-NEXT: vl1r.v v15, (a2)
+; CHECK-NEXT: vmsne.vi v13, v13, 0
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vmsne.vi v12, v14, 0
+; CHECK-NEXT: vmsne.vi v14, v15, 0
+; CHECK-NEXT: vsetvli zero, a4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vx v0, v13, a5
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vx v0, v12, a4
+; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v0, v14, a1
+; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
+; CHECK-NEXT: vle8.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vlseg4e8.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %interleaved.mask = tail call <vscale x 32 x i1> @llvm.vector.interleave4.nxv32i1(<vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask)
+ %vec = call <vscale x 32 x i8> @llvm.masked.load(ptr %p, i32 4, <vscale x 32 x i1> %interleaved.mask, <vscale x 32 x i8> %passthru)
+ %deinterleaved.results = call {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave4.nxv32i8(<vscale x 32 x i8> %vec)
+ ret {<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>} %deinterleaved.results
+}
>From 83ae371ff6285fa265f7cad3897340ba1fdee92f Mon Sep 17 00:00:00 2001
From: Han-Chung Wang <hanhan0912 at gmail.com>
Date: Fri, 18 Jul 2025 10:42:42 -0700
Subject: [PATCH 03/12] [mlir][linalg] Allow pack consumer fusion if the tile
size is greater than dimension size. (#149438)
This happens only when you use larger tile size, which is greater than
or equal to the dimension size. In this case, it is a full slice, so it
is fusible.
The IR can be generated during the TileAndFuse process. It is hard to
fix in such driver, so we enable the naive fusion for the case.
---------
Signed-off-by: hanhanW <hanhan0912 at gmail.com>
---
.../Linalg/Transforms/TilingInterfaceImpl.cpp | 6 ++-
.../tile-and-fuse-consumer.mlir | 50 +++++++++++++++++++
2 files changed, 54 insertions(+), 2 deletions(-)
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
index b059bcc025315..28d99b130963a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
@@ -911,14 +911,16 @@ struct PackOpTiling
// If a dimension is not tiled, it is always valid to fuse the pack op,
// even if the op has padding semantics. Because it always generates a
- // full slice along the dimension.
+ // full slice along the dimension. The tile sizes are for unpacked
+ // domain, i.e., `srcDimSize`, so `tileSize < srcDimSize` means that the
+ // dimension is tiled.
// TODO: It could be untiled if the `srcDimSize` is dynamic. It is a
// hard check to determine if a dimension is tiled or not.
int64_t srcDimSize = packOp.getSourceType().getDimSize(dim);
int64_t destDimSize = outerShapeWithoutTranspose[dim];
bool isTiled = failed(cstTileSize) ||
ShapedType::isDynamic(srcDimSize) ||
- cstTileSize.value() != srcDimSize;
+ cstTileSize.value() < srcDimSize;
if (!isTiled) {
outerDimOffsets.push_back(offsets[dim]);
if (ShapedType::isStatic(destDimSize)) {
diff --git a/mlir/test/Interfaces/TilingInterface/tile-and-fuse-consumer.mlir b/mlir/test/Interfaces/TilingInterface/tile-and-fuse-consumer.mlir
index 20164d5dfd91a..cdbca7228ded3 100644
--- a/mlir/test/Interfaces/TilingInterface/tile-and-fuse-consumer.mlir
+++ b/mlir/test/Interfaces/TilingInterface/tile-and-fuse-consumer.mlir
@@ -451,6 +451,56 @@ module attributes {transform.with_named_sequence} {
// -----
+#map = affine_map<(d0) -> (-d0 + 4, 16)>
+func.func @fuse_pack_consumer_if_single_iteration(%arg0: tensor<4x4xf32>) -> tensor<1x4x16x1xf32> {
+ %0 = tensor.empty() : tensor<1x4x16x1xf32>
+ %1 = tensor.empty() : tensor<4x4xf32>
+ %2 = scf.forall (%arg1) = (0) to (4) step (16) shared_outs(%arg2 = %1) -> (tensor<4x4xf32>) {
+ %3 = affine.min #map(%arg1)
+ %extracted_slice = tensor.extract_slice %arg0[%arg1, 0] [%3, 4] [1, 1] : tensor<4x4xf32> to tensor<?x4xf32>
+ %extracted_slice_0 = tensor.extract_slice %arg2[%arg1, 0] [%3, 4] [1, 1] : tensor<4x4xf32> to tensor<?x4xf32>
+ %4 = linalg.exp ins(%extracted_slice : tensor<?x4xf32>) outs(%extracted_slice_0 : tensor<?x4xf32>) -> tensor<?x4xf32>
+ scf.forall.in_parallel {
+ tensor.parallel_insert_slice %4 into %arg2[%arg1, 0] [%3, 4] [1, 1] : tensor<?x4xf32> into tensor<4x4xf32>
+ }
+ }
+ %cst = arith.constant 0.000000e+00 : f32
+ %pack = linalg.pack %2 padding_value(%cst : f32) outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 1] into %0 : tensor<4x4xf32> -> tensor<1x4x16x1xf32>
+ return %pack : tensor<1x4x16x1xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["tensor.parallel_insert_slice"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.match ops{["scf.forall"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %consumer, %fused_consumer = transform.test.fuse_consumer %0 in(%1) : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield
+ }
+}
+// CHECK: #[[MAP:.*]] = affine_map<(d0) -> (-d0 + 4, 16)>
+// CHECK: func.func @fuse_pack_consumer_if_single_iteration(
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK-DAG: %[[PACK_INIT:.*]] = tensor.empty() : tensor<1x4x16x1xf32>
+// CHECK-DAG: %[[ELEM_INIT:.*]] = tensor.empty() : tensor<4x4xf32>
+// CHECK-DAG: %[[PAD_VAL:.*]] = arith.constant 0.000000e+00 : f32
+// CHECK: %{{.*}}:2 = scf.forall (%[[IV:.*]]) = (0) to (4) step (16)
+// CHECK-SAME: shared_outs(%[[ELEM_OUT_ARG:.*]] = %[[ELEM_INIT]], %[[PACK_OUT_ARG:.*]] = %[[PACK_INIT]])
+// CHECK-DAG: %[[SIZE:.+]] = affine.min #[[MAP]](%[[IV]])
+// CHECK-DAG: %[[ELEM_SRC:.*]] = tensor.extract_slice %[[ARG0]][%[[IV]], 0] [%[[SIZE]], 4] [1, 1]
+// CHECK-DAG: %[[ELEM_DEST:.*]] = tensor.extract_slice %[[ELEM_OUT_ARG]][%[[IV]], 0] [%[[SIZE]], 4] [1, 1]
+// CHECK: %[[ELEM:.*]] = linalg.exp
+// CHECK-SAME: ins(%[[ELEM_SRC]]
+// CHECK-SAME: outs(%[[ELEM_DEST]]
+// CHECK-DAG: %[[TILED_PACK_DEST:.*]] = tensor.extract_slice %[[PACK_OUT_ARG]][%[[IV]], 0, 0, 0] [1, 4, 16, 1] [1, 1, 1, 1]
+// CHECK: %[[PACK:.*]] = linalg.pack %[[ELEM]]
+// CHECK-SAME: padding_value(%[[PAD_VAL]] : f32)
+// CHECK-SAME: outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 1]
+// CHECK-SAME: into %[[TILED_PACK_DEST]]
+// CHECK: scf.forall.in_parallel {
+// CHECK: tensor.parallel_insert_slice %[[ELEM]] into %[[ELEM_OUT_ARG]][%[[IV]], 0] [%[[SIZE]], 4] [1, 1]
+// CHECK: tensor.parallel_insert_slice %[[PACK]] into %[[PACK_OUT_ARG]][%[[IV]], 0, 0, 0] [1, 4, 16, 1] [1, 1, 1, 1]
+
+// -----
func.func @fuse_perfect_tiling_pack_consumer_with_outer_dims_perm(%arg0: tensor<64x32xf32>, %arg1: tensor<64x32xf32>, %arg2: tensor<2x64x16x1xf32>) -> tensor<2x64x16x1xf32> {
%0 = scf.forall (%arg3) = (0) to (32) step (16) shared_outs(%arg4 = %arg1) -> (tensor<64x32xf32>) {
>From fd247b5418e6e248ff14bc0131b2635e4ab886aa Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Fri, 18 Jul 2025 10:43:51 -0700
Subject: [PATCH 04/12] [ADT] Use a range-based for loop instead of
llvm::for_each (NFC) (#149542)
LLVM Coding Standards discourages llvm::for_each unless we already
have a callable.
---
llvm/include/llvm/ADT/CombinationGenerator.h | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/include/llvm/ADT/CombinationGenerator.h b/llvm/include/llvm/ADT/CombinationGenerator.h
index 6100aa9812293..bbdbd9bfa1be3 100644
--- a/llvm/include/llvm/ADT/CombinationGenerator.h
+++ b/llvm/include/llvm/ADT/CombinationGenerator.h
@@ -118,10 +118,9 @@ class CombinationGenerator {
: VariablesChoices(VariablesChoices_) {
#ifndef NDEBUG
assert(!VariablesChoices.empty() && "There should be some variables.");
- llvm::for_each(VariablesChoices, [](ArrayRef<choice_type> VariableChoices) {
+ for (ArrayRef<choice_type> VariableChoices : VariablesChoices)
assert(!VariableChoices.empty() &&
"There must always be some choice, at least a placeholder one.");
- });
#endif
}
>From fd7eb46758bc549d9e0cd6cc341c1ae51afd5a9f Mon Sep 17 00:00:00 2001
From: Peter Collingbourne <peter at pcc.me.uk>
Date: Fri, 18 Jul 2025 10:48:42 -0700
Subject: [PATCH 05/12] Add section type to support CFI jump table relaxation.
For context see main pull request: #147424.
Reviewers: MaskRay
Reviewed By: MaskRay
Pull Request: https://github.com/llvm/llvm-project/pull/149259
---
llvm/docs/Extensions.rst | 20 ++++++++++++++++++++
llvm/include/llvm/BinaryFormat/ELF.h | 1 +
llvm/lib/MC/MCParser/ELFAsmParser.cpp | 4 +++-
llvm/lib/MC/MCSectionELF.cpp | 4 +++-
llvm/lib/Object/ELF.cpp | 1 +
llvm/test/MC/AsmParser/llvm_section_types.s | 17 ++++++++++++++++-
6 files changed, 44 insertions(+), 3 deletions(-)
diff --git a/llvm/docs/Extensions.rst b/llvm/docs/Extensions.rst
index bad72c6ca8295..d8fb87b6998ad 100644
--- a/llvm/docs/Extensions.rst
+++ b/llvm/docs/Extensions.rst
@@ -581,6 +581,26 @@ This section stores pairs of (jump table address, number of entries).
This information is useful for tools that need to statically reconstruct
the control flow of executables.
+``SHT_LLVM_CFI_JUMP_TABLE`` Section (CFI jump table)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+This section contains the instructions that make up a `CFI jump table`_.
+It is expected to be ``SHF_ALLOC`` and may be laid out like a normal
+section. The ``SHT_LLVM_CFI_JUMP_TABLE`` section type gives the linker
+permission to modify the section in ways that would not normally be
+permitted, in order to optimize calls via the jump table.
+
+Each ``sh_entsize`` sized slice of a section of this type containing
+exactly one relocation may be considered to be a jump table entry
+that branches to the target of the relocation. This allows the linker
+to replace the jump table entry with the function body if it is small
+enough, or if the function is the last function in the jump table.
+
+A section of this type does not have to be placed according to its
+name. The linker may place the section in whichever output section it
+sees fit (generally the section that would provide the best locality).
+
+.. _CFI jump table: https://clang.llvm.org/docs/ControlFlowIntegrityDesign.html#forward-edge-cfi-for-indirect-function-calls
+
CodeView-Dependent
------------------
diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h
index 6bf2e177b5d40..e4f82ad96a084 100644
--- a/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/llvm/include/llvm/BinaryFormat/ELF.h
@@ -1159,6 +1159,7 @@ enum : unsigned {
SHT_LLVM_OFFLOADING = 0x6fff4c0b, // LLVM device offloading data.
SHT_LLVM_LTO = 0x6fff4c0c, // .llvm.lto for fat LTO.
SHT_LLVM_JT_SIZES = 0x6fff4c0d, // LLVM jump tables sizes.
+ SHT_LLVM_CFI_JUMP_TABLE = 0x6fff4c0e, // LLVM CFI jump table.
// Android's experimental support for SHT_RELR sections.
// https://android.googlesource.com/platform/bionic/+/b7feec74547f84559a1467aca02708ff61346d2a/libc/include/elf.h#512
SHT_ANDROID_RELR = 0x6fffff00, // Relocation entries; only offsets.
diff --git a/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/llvm/lib/MC/MCParser/ELFAsmParser.cpp
index ec8b40261a6ca..c7c3df330fc94 100644
--- a/llvm/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/llvm/lib/MC/MCParser/ELFAsmParser.cpp
@@ -571,7 +571,7 @@ bool ELFAsmParser::parseSectionArguments(bool IsPush, SMLoc loc) {
return TokError("expected end of directive");
}
- if (Mergeable)
+ if (Mergeable || TypeName == "llvm_cfi_jump_table")
if (parseMergeSize(Size))
return true;
if (Flags & ELF::SHF_LINK_ORDER)
@@ -637,6 +637,8 @@ bool ELFAsmParser::parseSectionArguments(bool IsPush, SMLoc loc) {
Type = ELF::SHT_LLVM_LTO;
else if (TypeName == "llvm_jt_sizes")
Type = ELF::SHT_LLVM_JT_SIZES;
+ else if (TypeName == "llvm_cfi_jump_table")
+ Type = ELF::SHT_LLVM_CFI_JUMP_TABLE;
else if (TypeName.getAsInteger(0, Type))
return TokError("unknown section type");
}
diff --git a/llvm/lib/MC/MCSectionELF.cpp b/llvm/lib/MC/MCSectionELF.cpp
index cc7cdf2fe4d1a..299fe40706e3a 100644
--- a/llvm/lib/MC/MCSectionELF.cpp
+++ b/llvm/lib/MC/MCSectionELF.cpp
@@ -176,11 +176,13 @@ void MCSectionELF::printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
OS << "llvm_lto";
else if (Type == ELF::SHT_LLVM_JT_SIZES)
OS << "llvm_jt_sizes";
+ else if (Type == ELF::SHT_LLVM_CFI_JUMP_TABLE)
+ OS << "llvm_cfi_jump_table";
else
OS << "0x" << Twine::utohexstr(Type);
if (EntrySize) {
- assert(Flags & ELF::SHF_MERGE);
+ assert((Flags & ELF::SHF_MERGE) || Type == ELF::SHT_LLVM_CFI_JUMP_TABLE);
OS << "," << EntrySize;
}
diff --git a/llvm/lib/Object/ELF.cpp b/llvm/lib/Object/ELF.cpp
index af073f6a1a917..788c6020a7f99 100644
--- a/llvm/lib/Object/ELF.cpp
+++ b/llvm/lib/Object/ELF.cpp
@@ -321,6 +321,7 @@ StringRef llvm::object::getELFSectionTypeName(uint32_t Machine, unsigned Type) {
STRINGIFY_ENUM_CASE(ELF, SHT_LLVM_OFFLOADING);
STRINGIFY_ENUM_CASE(ELF, SHT_LLVM_LTO);
STRINGIFY_ENUM_CASE(ELF, SHT_LLVM_JT_SIZES)
+ STRINGIFY_ENUM_CASE(ELF, SHT_LLVM_CFI_JUMP_TABLE)
STRINGIFY_ENUM_CASE(ELF, SHT_GNU_SFRAME);
STRINGIFY_ENUM_CASE(ELF, SHT_GNU_ATTRIBUTES);
STRINGIFY_ENUM_CASE(ELF, SHT_GNU_HASH);
diff --git a/llvm/test/MC/AsmParser/llvm_section_types.s b/llvm/test/MC/AsmParser/llvm_section_types.s
index 147b1499d2b88..83e5db0256647 100644
--- a/llvm/test/MC/AsmParser/llvm_section_types.s
+++ b/llvm/test/MC/AsmParser/llvm_section_types.s
@@ -1,22 +1,34 @@
-## Verify that LLVM-specific section types are correctly inferred from assembly input.
+## Verify that LLVM-specific section types are correctly inferred from assembly input and printed.
+# RUN: llvm-mc -triple i386-pc-linux %s | FileCheck --check-prefix=ASM %s
# RUN: llvm-mc -triple i386-pc-linux -filetype=obj -o %t %s
# RUN: llvm-readobj -S %t | FileCheck %s
+# ASM: .section .section1,"", at llvm_bb_addr_map
.section .section1,"", at llvm_bb_addr_map
.byte 1
+# ASM: .section .section2,"", at llvm_call_graph_profile
.section .section2,"", at llvm_call_graph_profile
.byte 1
+# ASM: .section .section3,"", at llvm_odrtab
.section .section3,"", at llvm_odrtab
.byte 1
+# ASM: .section .section4,"", at llvm_linker_options
.section .section4,"", at llvm_linker_options
.byte 1
+# ASM: .section .section5,"", at llvm_sympart
.section .section5,"", at llvm_sympart
.byte 1
+# ASM: .section .section6,"", at llvm_dependent_libraries
.section .section6,"", at llvm_dependent_libraries
.byte 1
+# ASM: .section .section7,"", at llvm_offloading
.section .section7,"", at llvm_offloading
.byte 1
+# ASM: .section .section8,"", at llvm_lto
.section .section8,"", at llvm_lto
.byte 1
+# ASM: .section .section9,"", at llvm_cfi_jump_table,1
+.section .section9,"", at llvm_cfi_jump_table,1
+.byte 1
# CHECK: Name: .section1
# CHECK-NEXT: Type: SHT_LLVM_BB_ADDR_MAP
@@ -34,3 +46,6 @@
# CHECK-NEXT: Type: SHT_LLVM_OFFLOADING
# CHECK: Name: .section8
# CHECK-NEXT: Type: SHT_LLVM_LTO
+# CHECK: Name: .section9
+# CHECK-NEXT: Type: SHT_LLVM_CFI_JUMP_TABLE
+# CHECK: EntrySize: 1
>From 44c134c24e704c226b8087bb25e039b702c4bd49 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 18 Jul 2025 11:04:18 -0700
Subject: [PATCH 06/12] [RISCV][IA] Factor out code for extracting operands
from mem insts [nfc] (#149344)
We're going to end up repeating the operand extraction four times once
all of the routines have been updated to support both plain load/store
and vp.load/vp.store. I plan to add masked.load/masked.store in the near
future, and we'd need to add that to each of the four cases. Instead,
factor out a single copy of the operand normalization.
---
.../Target/RISCV/RISCVInterleavedAccess.cpp | 138 +++++++-----------
1 file changed, 56 insertions(+), 82 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
index 38cc0ce00a352..dd68a5556cdb5 100644
--- a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
@@ -102,6 +102,56 @@ static bool isMultipleOfN(const Value *V, const DataLayout &DL, unsigned N) {
return false;
}
+/// Do the common operand retrieval and validition required by the
+/// routines below.
+static bool getMemOperands(unsigned Factor, VectorType *VTy, Type *XLenTy,
+ Instruction *I, Value *&Ptr, Value *&Mask,
+ Value *&VL, Align &Alignment) {
+
+ IRBuilder<> Builder(I);
+ const DataLayout &DL = I->getDataLayout();
+ ElementCount EC = VTy->getElementCount();
+ if (auto *LI = dyn_cast<LoadInst>(I)) {
+ assert(LI->isSimple());
+ Ptr = LI->getPointerOperand();
+ Alignment = LI->getAlign();
+ assert(!Mask && "Unexpected mask on a load");
+ Mask = Builder.getAllOnesMask(EC);
+ VL = isa<FixedVectorType>(VTy) ? Builder.CreateElementCount(XLenTy, EC)
+ : Constant::getAllOnesValue(XLenTy);
+ return true;
+ }
+ if (auto *SI = dyn_cast<StoreInst>(I)) {
+ assert(SI->isSimple());
+ Ptr = SI->getPointerOperand();
+ Alignment = SI->getAlign();
+ assert(!Mask && "Unexpected mask on a store");
+ Mask = Builder.getAllOnesMask(EC);
+ VL = isa<FixedVectorType>(VTy) ? Builder.CreateElementCount(XLenTy, EC)
+ : Constant::getAllOnesValue(XLenTy);
+ return true;
+ }
+ auto *VPLdSt = cast<VPIntrinsic>(I);
+ assert((VPLdSt->getIntrinsicID() == Intrinsic::vp_load ||
+ VPLdSt->getIntrinsicID() == Intrinsic::vp_store) &&
+ "Unexpected intrinsic");
+ Ptr = VPLdSt->getMemoryPointerParam();
+ Alignment = VPLdSt->getPointerAlignment().value_or(
+ DL.getABITypeAlign(VTy->getElementType()));
+
+ assert(Mask && "vp.load and vp.store needs a mask!");
+
+ Value *WideEVL = VPLdSt->getVectorLengthParam();
+ // Conservatively check if EVL is a multiple of factor, otherwise some
+ // (trailing) elements might be lost after the transformation.
+ if (!isMultipleOfN(WideEVL, I->getDataLayout(), Factor))
+ return false;
+
+ auto *FactorC = ConstantInt::get(WideEVL->getType(), Factor);
+ VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy);
+ return true;
+}
+
/// Lower an interleaved load into a vlsegN intrinsic.
///
/// E.g. Lower an interleaved load (Factor = 2):
@@ -127,32 +177,8 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
Value *Ptr, *VL;
Align Alignment;
- if (auto *LI = dyn_cast<LoadInst>(Load)) {
- assert(LI->isSimple());
- Ptr = LI->getPointerOperand();
- Alignment = LI->getAlign();
- assert(!Mask && "Unexpected mask on a load\n");
- Mask = Builder.getAllOnesMask(VTy->getElementCount());
- VL = Builder.CreateElementCount(XLenTy, VTy->getElementCount());
- } else {
- auto *VPLoad = cast<VPIntrinsic>(Load);
- assert(VPLoad->getIntrinsicID() == Intrinsic::vp_load &&
- "Unexpected intrinsic");
- Ptr = VPLoad->getMemoryPointerParam();
- Alignment = VPLoad->getPointerAlignment().value_or(
- DL.getABITypeAlign(VTy->getElementType()));
-
- assert(Mask && "vp.load needs a mask!");
-
- Value *WideEVL = VPLoad->getVectorLengthParam();
- // Conservatively check if EVL is a multiple of factor, otherwise some
- // (trailing) elements might be lost after the transformation.
- if (!isMultipleOfN(WideEVL, DL, Factor))
- return false;
-
- auto *FactorC = ConstantInt::get(WideEVL->getType(), Factor);
- VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy);
- }
+ if (!getMemOperands(Factor, VTy, XLenTy, Load, Ptr, Mask, VL, Alignment))
+ return false;
Type *PtrTy = Ptr->getType();
unsigned AS = PtrTy->getPointerAddressSpace();
@@ -296,34 +322,8 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
Value *Ptr, *VL;
Align Alignment;
- if (auto *LI = dyn_cast<LoadInst>(Load)) {
- assert(LI->isSimple());
- Ptr = LI->getPointerOperand();
- Alignment = LI->getAlign();
- assert(!Mask && "Unexpected mask on a load\n");
- Mask = Builder.getAllOnesMask(ResVTy->getElementCount());
- VL = isa<FixedVectorType>(ResVTy)
- ? Builder.CreateElementCount(XLenTy, ResVTy->getElementCount())
- : Constant::getAllOnesValue(XLenTy);
- } else {
- auto *VPLoad = cast<VPIntrinsic>(Load);
- assert(VPLoad->getIntrinsicID() == Intrinsic::vp_load &&
- "Unexpected intrinsic");
- Ptr = VPLoad->getMemoryPointerParam();
- Alignment = VPLoad->getPointerAlignment().value_or(
- DL.getABITypeAlign(ResVTy->getElementType()));
-
- assert(Mask && "vp.load needs a mask!");
-
- Value *WideEVL = VPLoad->getVectorLengthParam();
- // Conservatively check if EVL is a multiple of factor, otherwise some
- // (trailing) elements might be lost after the transformation.
- if (!isMultipleOfN(WideEVL, Load->getDataLayout(), Factor))
- return false;
-
- auto *FactorC = ConstantInt::get(WideEVL->getType(), Factor);
- VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy);
- }
+ if (!getMemOperands(Factor, ResVTy, XLenTy, Load, Ptr, Mask, VL, Alignment))
+ return false;
Type *PtrTy = Ptr->getType();
unsigned AS = PtrTy->getPointerAddressSpace();
@@ -385,34 +385,8 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
Value *Ptr, *VL;
Align Alignment;
- if (auto *SI = dyn_cast<StoreInst>(Store)) {
- assert(SI->isSimple());
- Ptr = SI->getPointerOperand();
- Alignment = SI->getAlign();
- assert(!Mask && "Unexpected mask on a store");
- Mask = Builder.getAllOnesMask(InVTy->getElementCount());
- VL = isa<FixedVectorType>(InVTy)
- ? Builder.CreateElementCount(XLenTy, InVTy->getElementCount())
- : Constant::getAllOnesValue(XLenTy);
- } else {
- auto *VPStore = cast<VPIntrinsic>(Store);
- assert(VPStore->getIntrinsicID() == Intrinsic::vp_store &&
- "Unexpected intrinsic");
- Ptr = VPStore->getMemoryPointerParam();
- Alignment = VPStore->getPointerAlignment().value_or(
- DL.getABITypeAlign(InVTy->getElementType()));
-
- assert(Mask && "vp.store needs a mask!");
-
- Value *WideEVL = VPStore->getVectorLengthParam();
- // Conservatively check if EVL is a multiple of factor, otherwise some
- // (trailing) elements might be lost after the transformation.
- if (!isMultipleOfN(WideEVL, DL, Factor))
- return false;
-
- auto *FactorC = ConstantInt::get(WideEVL->getType(), Factor);
- VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy);
- }
+ if (!getMemOperands(Factor, InVTy, XLenTy, Store, Ptr, Mask, VL, Alignment))
+ return false;
Type *PtrTy = Ptr->getType();
unsigned AS = Ptr->getType()->getPointerAddressSpace();
if (!isLegalInterleavedAccessType(InVTy, Factor, Alignment, AS, DL))
>From 5ec5d4fa2275d49f3b500ce3dfd5490deee59358 Mon Sep 17 00:00:00 2001
From: Daniel Chen <cdchen at ca.ibm.com>
Date: Fri, 18 Jul 2025 14:14:27 -0400
Subject: [PATCH 07/12] Static_cast std::size_t to build flang_rt in 32-bit.
(#149529)
---
flang-rt/lib/runtime/descriptor.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/flang-rt/lib/runtime/descriptor.cpp b/flang-rt/lib/runtime/descriptor.cpp
index b723acdd27bd5..e9301bd0307d2 100644
--- a/flang-rt/lib/runtime/descriptor.cpp
+++ b/flang-rt/lib/runtime/descriptor.cpp
@@ -85,7 +85,7 @@ RT_API_ATTRS void Descriptor::Establish(int characterKind,
RT_API_ATTRS void Descriptor::Establish(const typeInfo::DerivedType &dt,
void *p, int rank, const SubscriptValue *extent,
ISO::CFI_attribute_t attribute) {
- std::size_t elementBytes{dt.sizeInBytes()};
+ auto elementBytes{static_cast<std::size_t>(dt.sizeInBytes())};
ISO::EstablishDescriptor(
&raw_, p, attribute, CFI_type_struct, elementBytes, rank, extent);
if (elementBytes == 0) {
>From aa831f5a75be48f4ca04f192e9263b72098e59e4 Mon Sep 17 00:00:00 2001
From: Tobias Decking <Tobias.Decking at gmail.com>
Date: Fri, 18 Jul 2025 20:14:34 +0200
Subject: [PATCH 08/12] [X86] Ensure that bit reversals of byte vectors are
properly lowered on pure GFNI targets (#148304)
Fixes #148238.
When GFNI is present, custom bit reversal lowerings for scalar integers
become active. They work by swapping the bytes in the scalar value and
then reversing bits in a vector of bytes. However, the custom bit
reversal lowering for a vector of bytes is disabled if GFNI is present
in isolation, resulting messed up code.
---------
Co-authored-by: Simon Pilgrim <llvm-dev at redking.me.uk>
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 9 +-
llvm/test/CodeGen/X86/vector-bitreverse.ll | 388 ++++++++++++++++-----
2 files changed, 313 insertions(+), 84 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d91ea1ea1bb1b..62811244dcfee 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1323,11 +1323,15 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
}
- if (Subtarget.hasGFNI()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasGFNI()) {
setOperationAction(ISD::BITREVERSE, MVT::i8, Custom);
setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
setOperationAction(ISD::BITREVERSE, MVT::i64, Custom);
+
+ for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
+ setOperationAction(ISD::BITREVERSE, VT, Custom);
+ }
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
@@ -32694,7 +32698,8 @@ static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
if (Subtarget.hasXOP() && !VT.is512BitVector())
return LowerBITREVERSE_XOP(Op, DAG);
- assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
+ assert((Subtarget.hasSSSE3() || Subtarget.hasGFNI()) &&
+ "SSSE3 or GFNI required for BITREVERSE");
SDValue In = Op.getOperand(0);
SDLoc DL(Op);
diff --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll
index 5dcf19013f0b7..834dfd63432b0 100644
--- a/llvm/test/CodeGen/X86/vector-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll
@@ -8,7 +8,8 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=ALL,XOP,XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=ALL,XOP,XOPAVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3,+gfni | FileCheck %s --check-prefixes=ALL,GFNISSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+gfni | FileCheck %s --check-prefixes=ALL,GFNISSE,GFNISSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3,+gfni | FileCheck %s --check-prefixes=ALL,GFNISSE,GFNISSSE3
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+gfni | FileCheck %s --check-prefixes=ALL,GFNIAVX,GFNIAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+gfni | FileCheck %s --check-prefixes=ALL,GFNIAVX,GFNIAVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+gfni | FileCheck %s --check-prefixes=ALL,GFNIAVX,GFNIAVX512,GFNIAVX512F
@@ -492,11 +493,20 @@ define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
; XOP-NEXT: vpperm {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
;
-; GFNISSE-LABEL: test_bitreverse_v8i16:
-; GFNISSE: # %bb.0:
-; GFNISSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
-; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT: retq
+; GFNISSE2-LABEL: test_bitreverse_v8i16:
+; GFNISSE2: # %bb.0:
+; GFNISSE2-NEXT: movdqa %xmm0, %xmm1
+; GFNISSE2-NEXT: psrlw $8, %xmm1
+; GFNISSE2-NEXT: psllw $8, %xmm0
+; GFNISSE2-NEXT: por %xmm1, %xmm0
+; GFNISSE2-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; GFNISSE2-NEXT: retq
+;
+; GFNISSSE3-LABEL: test_bitreverse_v8i16:
+; GFNISSSE3: # %bb.0:
+; GFNISSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; GFNISSSE3-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; GFNISSSE3-NEXT: retq
;
; GFNIAVX-LABEL: test_bitreverse_v8i16:
; GFNIAVX: # %bb.0:
@@ -605,11 +615,25 @@ define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
; XOP-NEXT: vpperm {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
;
-; GFNISSE-LABEL: test_bitreverse_v4i32:
-; GFNISSE: # %bb.0:
-; GFNISSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT: retq
+; GFNISSE2-LABEL: test_bitreverse_v4i32:
+; GFNISSE2: # %bb.0:
+; GFNISSE2-NEXT: pxor %xmm1, %xmm1
+; GFNISSE2-NEXT: movdqa %xmm0, %xmm2
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm2, %xmm0
+; GFNISSE2-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; GFNISSE2-NEXT: retq
+;
+; GFNISSSE3-LABEL: test_bitreverse_v4i32:
+; GFNISSSE3: # %bb.0:
+; GFNISSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; GFNISSSE3-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; GFNISSSE3-NEXT: retq
;
; GFNIAVX-LABEL: test_bitreverse_v4i32:
; GFNIAVX: # %bb.0:
@@ -720,11 +744,27 @@ define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
; XOP-NEXT: vpperm {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
;
-; GFNISSE-LABEL: test_bitreverse_v2i64:
-; GFNISSE: # %bb.0:
-; GFNISSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
-; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; GFNISSE-NEXT: retq
+; GFNISSE2-LABEL: test_bitreverse_v2i64:
+; GFNISSE2: # %bb.0:
+; GFNISSE2-NEXT: pxor %xmm1, %xmm1
+; GFNISSE2-NEXT: movdqa %xmm0, %xmm2
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm2, %xmm0
+; GFNISSE2-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; GFNISSE2-NEXT: retq
+;
+; GFNISSSE3-LABEL: test_bitreverse_v2i64:
+; GFNISSSE3: # %bb.0:
+; GFNISSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
+; GFNISSSE3-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; GFNISSSE3-NEXT: retq
;
; GFNIAVX-LABEL: test_bitreverse_v2i64:
; GFNIAVX: # %bb.0:
@@ -1042,15 +1082,30 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; GFNISSE-LABEL: test_bitreverse_v16i16:
-; GFNISSE: # %bb.0:
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
-; GFNISSE-NEXT: pshufb %xmm2, %xmm0
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm3, %xmm0
-; GFNISSE-NEXT: pshufb %xmm2, %xmm1
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm3, %xmm1
-; GFNISSE-NEXT: retq
+; GFNISSE2-LABEL: test_bitreverse_v16i16:
+; GFNISSE2: # %bb.0:
+; GFNISSE2-NEXT: movdqa %xmm0, %xmm2
+; GFNISSE2-NEXT: psrlw $8, %xmm2
+; GFNISSE2-NEXT: psllw $8, %xmm0
+; GFNISSE2-NEXT: por %xmm2, %xmm0
+; GFNISSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm2, %xmm0
+; GFNISSE2-NEXT: movdqa %xmm1, %xmm3
+; GFNISSE2-NEXT: psrlw $8, %xmm3
+; GFNISSE2-NEXT: psllw $8, %xmm1
+; GFNISSE2-NEXT: por %xmm3, %xmm1
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm2, %xmm1
+; GFNISSE2-NEXT: retq
+;
+; GFNISSSE3-LABEL: test_bitreverse_v16i16:
+; GFNISSSE3: # %bb.0:
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; GFNISSSE3-NEXT: pshufb %xmm2, %xmm0
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm3, %xmm0
+; GFNISSSE3-NEXT: pshufb %xmm2, %xmm1
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm3, %xmm1
+; GFNISSSE3-NEXT: retq
;
; GFNIAVX1-LABEL: test_bitreverse_v16i16:
; GFNIAVX1: # %bb.0:
@@ -1241,15 +1296,39 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; GFNISSE-LABEL: test_bitreverse_v8i32:
-; GFNISSE: # %bb.0:
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; GFNISSE-NEXT: pshufb %xmm2, %xmm0
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm3, %xmm0
-; GFNISSE-NEXT: pshufb %xmm2, %xmm1
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm3, %xmm1
-; GFNISSE-NEXT: retq
+; GFNISSE2-LABEL: test_bitreverse_v8i32:
+; GFNISSE2: # %bb.0:
+; GFNISSE2-NEXT: pxor %xmm2, %xmm2
+; GFNISSE2-NEXT: movdqa %xmm0, %xmm3
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm3, %xmm0
+; GFNISSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm3, %xmm0
+; GFNISSE2-NEXT: movdqa %xmm1, %xmm4
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm4, %xmm1
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm3, %xmm1
+; GFNISSE2-NEXT: retq
+;
+; GFNISSSE3-LABEL: test_bitreverse_v8i32:
+; GFNISSSE3: # %bb.0:
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; GFNISSSE3-NEXT: pshufb %xmm2, %xmm0
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm3, %xmm0
+; GFNISSSE3-NEXT: pshufb %xmm2, %xmm1
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm3, %xmm1
+; GFNISSSE3-NEXT: retq
;
; GFNIAVX1-LABEL: test_bitreverse_v8i32:
; GFNIAVX1: # %bb.0:
@@ -1444,15 +1523,43 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; GFNISSE-LABEL: test_bitreverse_v4i64:
-; GFNISSE: # %bb.0:
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
-; GFNISSE-NEXT: pshufb %xmm2, %xmm0
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm3, %xmm0
-; GFNISSE-NEXT: pshufb %xmm2, %xmm1
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm3, %xmm1
-; GFNISSE-NEXT: retq
+; GFNISSE2-LABEL: test_bitreverse_v4i64:
+; GFNISSE2: # %bb.0:
+; GFNISSE2-NEXT: pxor %xmm2, %xmm2
+; GFNISSE2-NEXT: movdqa %xmm0, %xmm3
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm3, %xmm0
+; GFNISSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm3, %xmm0
+; GFNISSE2-NEXT: movdqa %xmm1, %xmm4
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm4, %xmm1
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm3, %xmm1
+; GFNISSE2-NEXT: retq
+;
+; GFNISSSE3-LABEL: test_bitreverse_v4i64:
+; GFNISSSE3: # %bb.0:
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
+; GFNISSSE3-NEXT: pshufb %xmm2, %xmm0
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm3, %xmm0
+; GFNISSSE3-NEXT: pshufb %xmm2, %xmm1
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm3, %xmm1
+; GFNISSSE3-NEXT: retq
;
; GFNIAVX1-LABEL: test_bitreverse_v4i64:
; GFNIAVX1: # %bb.0:
@@ -2035,19 +2142,44 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX2-NEXT: retq
;
-; GFNISSE-LABEL: test_bitreverse_v32i16:
-; GFNISSE: # %bb.0:
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
-; GFNISSE-NEXT: pshufb %xmm4, %xmm0
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm0
-; GFNISSE-NEXT: pshufb %xmm4, %xmm1
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm1
-; GFNISSE-NEXT: pshufb %xmm4, %xmm2
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm2
-; GFNISSE-NEXT: pshufb %xmm4, %xmm3
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm3
-; GFNISSE-NEXT: retq
+; GFNISSE2-LABEL: test_bitreverse_v32i16:
+; GFNISSE2: # %bb.0:
+; GFNISSE2-NEXT: movdqa %xmm0, %xmm4
+; GFNISSE2-NEXT: psrlw $8, %xmm4
+; GFNISSE2-NEXT: psllw $8, %xmm0
+; GFNISSE2-NEXT: por %xmm4, %xmm0
+; GFNISSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm4, %xmm0
+; GFNISSE2-NEXT: movdqa %xmm1, %xmm5
+; GFNISSE2-NEXT: psrlw $8, %xmm5
+; GFNISSE2-NEXT: psllw $8, %xmm1
+; GFNISSE2-NEXT: por %xmm5, %xmm1
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm4, %xmm1
+; GFNISSE2-NEXT: movdqa %xmm2, %xmm5
+; GFNISSE2-NEXT: psrlw $8, %xmm5
+; GFNISSE2-NEXT: psllw $8, %xmm2
+; GFNISSE2-NEXT: por %xmm5, %xmm2
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm4, %xmm2
+; GFNISSE2-NEXT: movdqa %xmm3, %xmm5
+; GFNISSE2-NEXT: psrlw $8, %xmm5
+; GFNISSE2-NEXT: psllw $8, %xmm3
+; GFNISSE2-NEXT: por %xmm5, %xmm3
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm4, %xmm3
+; GFNISSE2-NEXT: retq
+;
+; GFNISSSE3-LABEL: test_bitreverse_v32i16:
+; GFNISSSE3: # %bb.0:
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm0
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm0
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm1
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm1
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm2
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm2
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm3
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm3
+; GFNISSSE3-NEXT: retq
;
; GFNIAVX1-LABEL: test_bitreverse_v32i16:
; GFNIAVX1: # %bb.0:
@@ -2393,19 +2525,61 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX2-NEXT: retq
;
-; GFNISSE-LABEL: test_bitreverse_v16i32:
-; GFNISSE: # %bb.0:
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; GFNISSE-NEXT: pshufb %xmm4, %xmm0
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm0
-; GFNISSE-NEXT: pshufb %xmm4, %xmm1
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm1
-; GFNISSE-NEXT: pshufb %xmm4, %xmm2
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm2
-; GFNISSE-NEXT: pshufb %xmm4, %xmm3
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm3
-; GFNISSE-NEXT: retq
+; GFNISSE2-LABEL: test_bitreverse_v16i32:
+; GFNISSE2: # %bb.0:
+; GFNISSE2-NEXT: pxor %xmm4, %xmm4
+; GFNISSE2-NEXT: movdqa %xmm0, %xmm5
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm5, %xmm0
+; GFNISSE2-NEXT: movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm5, %xmm0
+; GFNISSE2-NEXT: movdqa %xmm1, %xmm6
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm6, %xmm1
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm5, %xmm1
+; GFNISSE2-NEXT: movdqa %xmm2, %xmm6
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm6, %xmm2
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm5, %xmm2
+; GFNISSE2-NEXT: movdqa %xmm3, %xmm6
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm6, %xmm3
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm5, %xmm3
+; GFNISSE2-NEXT: retq
+;
+; GFNISSSE3-LABEL: test_bitreverse_v16i32:
+; GFNISSSE3: # %bb.0:
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm4 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm0
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm0
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm1
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm1
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm2
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm2
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm3
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm3
+; GFNISSSE3-NEXT: retq
;
; GFNIAVX1-LABEL: test_bitreverse_v16i32:
; GFNIAVX1: # %bb.0:
@@ -2759,19 +2933,69 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX2-NEXT: retq
;
-; GFNISSE-LABEL: test_bitreverse_v8i64:
-; GFNISSE: # %bb.0:
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
-; GFNISSE-NEXT: pshufb %xmm4, %xmm0
-; GFNISSE-NEXT: movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm0
-; GFNISSE-NEXT: pshufb %xmm4, %xmm1
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm1
-; GFNISSE-NEXT: pshufb %xmm4, %xmm2
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm2
-; GFNISSE-NEXT: pshufb %xmm4, %xmm3
-; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm3
-; GFNISSE-NEXT: retq
+; GFNISSE2-LABEL: test_bitreverse_v8i64:
+; GFNISSE2: # %bb.0:
+; GFNISSE2-NEXT: pxor %xmm4, %xmm4
+; GFNISSE2-NEXT: movdqa %xmm0, %xmm5
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm5, %xmm0
+; GFNISSE2-NEXT: movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm5, %xmm0
+; GFNISSE2-NEXT: movdqa %xmm1, %xmm6
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm6, %xmm1
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm5, %xmm1
+; GFNISSE2-NEXT: movdqa %xmm2, %xmm6
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm6, %xmm2
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm5, %xmm2
+; GFNISSE2-NEXT: movdqa %xmm3, %xmm6
+; GFNISSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; GFNISSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; GFNISSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; GFNISSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; GFNISSE2-NEXT: packuswb %xmm6, %xmm3
+; GFNISSE2-NEXT: gf2p8affineqb $0, %xmm5, %xmm3
+; GFNISSE2-NEXT: retq
+;
+; GFNISSSE3-LABEL: test_bitreverse_v8i64:
+; GFNISSSE3: # %bb.0:
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm4 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm0
+; GFNISSSE3-NEXT: movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm0
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm1
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm1
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm2
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm2
+; GFNISSSE3-NEXT: pshufb %xmm4, %xmm3
+; GFNISSSE3-NEXT: gf2p8affineqb $0, %xmm5, %xmm3
+; GFNISSSE3-NEXT: retq
;
; GFNIAVX1-LABEL: test_bitreverse_v8i64:
; GFNIAVX1: # %bb.0:
>From 605816634f3779fe9059a67e3c89b583decfa988 Mon Sep 17 00:00:00 2001
From: Krzysztof Parzyszek <Krzysztof.Parzyszek at amd.com>
Date: Fri, 18 Jul 2025 13:34:15 -0500
Subject: [PATCH 09/12] [STLForwardCompat] Improve category handling in
transformOptional (#149539)
The old version would prefer the "const &" overload over the "&&" one
unless the former was not allowed in the given situation. In particular,
if the function passed was "[](auto &&)" the argument would be "const &"
even if the value passed to transformOptional was an rvalue reference.
This version improves the handling of expression categories, and the
lambda argument category will reflect the argument category in the above
scenario.
---
llvm/include/llvm/ADT/STLForwardCompat.h | 22 ++++++------------
llvm/unittests/ADT/STLForwardCompatTest.cpp | 25 +++++++++++++++++++++
2 files changed, 32 insertions(+), 15 deletions(-)
diff --git a/llvm/include/llvm/ADT/STLForwardCompat.h b/llvm/include/llvm/ADT/STLForwardCompat.h
index 7bd2c8705f393..81b9a685e11d2 100644
--- a/llvm/include/llvm/ADT/STLForwardCompat.h
+++ b/llvm/include/llvm/ADT/STLForwardCompat.h
@@ -55,21 +55,13 @@ using type_identity_t // NOLINT(readability-identifier-naming)
// TODO: Remove this in favor of std::optional<T>::transform once we switch to
// C++23.
-template <typename T, typename Function>
-auto transformOptional(const std::optional<T> &O, const Function &F)
- -> std::optional<decltype(F(*O))> {
- if (O)
- return F(*O);
- return std::nullopt;
-}
-
-// TODO: Remove this in favor of std::optional<T>::transform once we switch to
-// C++23.
-template <typename T, typename Function>
-auto transformOptional(std::optional<T> &&O, const Function &F)
- -> std::optional<decltype(F(*std::move(O)))> {
- if (O)
- return F(*std::move(O));
+template <typename Optional, typename Function,
+ typename Value = typename llvm::remove_cvref_t<Optional>::value_type>
+std::optional<std::invoke_result_t<Function, Value>>
+transformOptional(Optional &&O, Function &&F) {
+ if (O) {
+ return F(*std::forward<Optional>(O));
+ }
return std::nullopt;
}
diff --git a/llvm/unittests/ADT/STLForwardCompatTest.cpp b/llvm/unittests/ADT/STLForwardCompatTest.cpp
index e3d500aa7b55a..4a8f53cf72f94 100644
--- a/llvm/unittests/ADT/STLForwardCompatTest.cpp
+++ b/llvm/unittests/ADT/STLForwardCompatTest.cpp
@@ -10,6 +10,11 @@
#include "CountCopyAndMove.h"
#include "gtest/gtest.h"
+#include <optional>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
namespace {
template <typename T>
@@ -142,6 +147,26 @@ TEST(TransformTest, MoveTransformLlvm) {
EXPECT_EQ(0, CountCopyAndMove::Destructions);
}
+TEST(TransformTest, TransformCategory) {
+ struct StructA {
+ int x;
+ };
+ struct StructB : StructA {
+ StructB(StructA &&A) : StructA(std::move(A)) {}
+ };
+
+ std::optional<StructA> A{StructA{}};
+ llvm::transformOptional(A, [](auto &&s) {
+ EXPECT_FALSE(std::is_rvalue_reference_v<decltype(s)>);
+ return StructB{std::move(s)};
+ });
+
+ llvm::transformOptional(std::move(A), [](auto &&s) {
+ EXPECT_TRUE(std::is_rvalue_reference_v<decltype(s)>);
+ return StructB{std::move(s)};
+ });
+}
+
TEST(TransformTest, ToUnderlying) {
enum E { A1 = 0, B1 = -1 };
static_assert(llvm::to_underlying(A1) == 0);
>From 4d859915e6a22f31b04cb997b2c2c8490ec53f54 Mon Sep 17 00:00:00 2001
From: Roland McGrath <mcgrathr at google.com>
Date: Fri, 18 Jul 2025 11:35:09 -0700
Subject: [PATCH 10/12] [libc] Remove trivial .h.def files (#149466)
Remove all the .h.def files that already express nothing
whatsoever not already expressed in YAML. Clean up a few YAML
files without materially changing any generated header output.
Many more .h.def files remain that need a bit of conversion in
YAML to express macro requirements and such.
---
libc/include/dirent.h.def | 16 -------------
libc/include/dirent.yaml | 18 +++++++--------
libc/include/search.h.def | 18 ---------------
libc/include/search.yaml | 24 ++++++++++----------
libc/include/setjmp.h.def | 16 -------------
libc/include/setjmp.yaml | 10 ++++-----
libc/include/spawn.h.def | 16 -------------
libc/include/spawn.yaml | 18 +++++++--------
libc/include/string.h.def | 16 -------------
libc/include/string.yaml | 17 +++++++-------
libc/include/strings.h.def | 16 -------------
libc/include/strings.yaml | 31 +++++++++----------------
libc/include/sys/sendfile.h.def | 16 -------------
libc/include/sys/sendfile.yaml | 12 ++--------
libc/include/sys/statvfs.h.def | 16 -------------
libc/include/sys/statvfs.yaml | 10 ++++-----
libc/include/sys/types.yaml | 40 +++++++++++++++------------------
libc/include/sys/uio.h.def | 16 -------------
libc/include/sys/uio.yaml | 10 ++++-----
libc/include/sys/utsname.h.def | 16 -------------
libc/include/sys/utsname.yaml | 8 +++----
libc/include/threads.h.def | 16 -------------
libc/include/threads.yaml | 3 ++-
libc/include/uchar.h.def | 16 -------------
libc/include/uchar.yaml | 5 -----
25 files changed, 85 insertions(+), 315 deletions(-)
delete mode 100644 libc/include/dirent.h.def
delete mode 100644 libc/include/search.h.def
delete mode 100644 libc/include/setjmp.h.def
delete mode 100644 libc/include/spawn.h.def
delete mode 100644 libc/include/string.h.def
delete mode 100644 libc/include/strings.h.def
delete mode 100644 libc/include/sys/sendfile.h.def
delete mode 100644 libc/include/sys/statvfs.h.def
delete mode 100644 libc/include/sys/uio.h.def
delete mode 100644 libc/include/sys/utsname.h.def
delete mode 100644 libc/include/threads.h.def
delete mode 100644 libc/include/uchar.h.def
diff --git a/libc/include/dirent.h.def b/libc/include/dirent.h.def
deleted file mode 100644
index 6786578fbd067..0000000000000
--- a/libc/include/dirent.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- POSIX header dirent.h ---------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_DIRENT_H
-#define LLVM_LIBC_DIRENT_H
-
-#include "__llvm-libc-common.h"
-
-%%public_api()
-
-#endif // LLVM_LIBC_DIRENT_H
diff --git a/libc/include/dirent.yaml b/libc/include/dirent.yaml
index 3fc522fda80e4..66570bca6c495 100644
--- a/libc/include/dirent.yaml
+++ b/libc/include/dirent.yaml
@@ -1,47 +1,45 @@
header: dirent.h
-header_template: dirent.h.def
-macros: []
+standards:
+ - posix
types:
- type_name: struct_dirent
- type_name: DIR
- type_name: ino_t
-enums: []
-objects: []
functions:
- name: alphasort
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: const struct dirent **
- type: const struct dirent **
- name: closedir
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: DIR *
- name: dirfd
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: DIR *
- name: fdopendir
standards:
- - POSIX
+ - posix
return_type: DIR *
arguments:
- type: int
- name: opendir
standards:
- - POSIX
+ - posix
return_type: DIR *
arguments:
- type: const char *
- name: readdir
standards:
- - POSIX
+ - posix
return_type: struct dirent *
arguments:
- type: DIR *
diff --git a/libc/include/search.h.def b/libc/include/search.h.def
deleted file mode 100644
index 6301ba7b656ce..0000000000000
--- a/libc/include/search.h.def
+++ /dev/null
@@ -1,18 +0,0 @@
-//===-- POSIX header search.h ---------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SEARCH_H
-#define LLVM_LIBC_SEARCH_H
-
-#include "__llvm-libc-common.h"
-#define __need_size_t
-#include <stddef.h>
-
-%%public_api()
-
-#endif // LLVM_LIBC_SEARCH_H
diff --git a/libc/include/search.yaml b/libc/include/search.yaml
index e0247afad2cd6..8a3a0c50af60f 100644
--- a/libc/include/search.yaml
+++ b/libc/include/search.yaml
@@ -1,6 +1,6 @@
header: search.h
-header_template: search.h.def
-macros: []
+standards:
+ - posix
types:
- type_name: ACTION
- type_name: ENTRY
@@ -12,35 +12,35 @@ objects: []
functions:
- name: hcreate
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: size_t
- name: hcreate_r
- standards: GNUExtensions
+ standards: gnu
return_type: int
arguments:
- type: size_t
- type: struct hsearch_data *
- name: hdestroy
- standards: GNUExtensions
+ standards: gnu
return_type: void
arguments: []
- name: hdestroy_r
standards:
- - POSIX
+ - posix
return_type: void
arguments:
- type: struct hsearch_data *
- name: hsearch
standards:
- - POSIX
+ - posix
return_type: ENTRY *
arguments:
- type: ENTRY
- type: ACTION
- name: hsearch_r
- standards: GNUExtensions
+ standards: gnu
return_type: int
arguments:
- type: ENTRY
@@ -49,20 +49,20 @@ functions:
- type: struct hsearch_data *
- name: insque
standards:
- - POSIX
+ - posix
return_type: void
arguments:
- type: void *
- type: void *
- name: remque
standards:
- - POSIX
+ - posix
return_type: void
arguments:
- type: void *
- name: lfind
standards:
- - POSIX
+ - posix
return_type: void *
arguments:
- type: const void *
@@ -72,7 +72,7 @@ functions:
- type: __search_compare_t
- name: lsearch
standards:
- - POSIX
+ - posix
return_type: void *
arguments:
- type: const void *
diff --git a/libc/include/setjmp.h.def b/libc/include/setjmp.h.def
deleted file mode 100644
index 670bc1ac0fe24..0000000000000
--- a/libc/include/setjmp.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- C standard library header setjmp.h --------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SETJMP_H
-#define LLVM_LIBC_SETJMP_H
-
-#include "__llvm-libc-common.h"
-
-%%public_api()
-
-#endif // LLVM_LIBC_SETJMP_H
diff --git a/libc/include/setjmp.yaml b/libc/include/setjmp.yaml
index 00049e58c86c8..55e03470e33ca 100644
--- a/libc/include/setjmp.yaml
+++ b/libc/include/setjmp.yaml
@@ -1,10 +1,8 @@
header: setjmp.h
-header_template: setjmp.h.def
-macros: []
+standards:
+ - stdc
types:
- type_name: jmp_buf
-enums: []
-objects: []
functions:
- name: longjmp
standards:
@@ -23,7 +21,7 @@ functions:
- type: jmp_buf
- name: sigsetjmp
standards:
- - POSIX
+ - posix
return_type: int
attributes:
- _Returns_twice
@@ -32,7 +30,7 @@ functions:
- type: int
- name: siglongjmp
standards:
- - POSIX
+ - posix
return_type: _Noreturn void
arguments:
- type: sigjmp_buf
diff --git a/libc/include/spawn.h.def b/libc/include/spawn.h.def
deleted file mode 100644
index a8d7015852868..0000000000000
--- a/libc/include/spawn.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- POSIX header spawn.h ----------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SPAWN_H
-#define LLVM_LIBC_SPAWN_H
-
-#include "__llvm-libc-common.h"
-
-%%public_api()
-
-#endif // LLVM_LIBC_SPAWN_H
diff --git a/libc/include/spawn.yaml b/libc/include/spawn.yaml
index c763cc76fd094..ef39f66d080f6 100644
--- a/libc/include/spawn.yaml
+++ b/libc/include/spawn.yaml
@@ -1,17 +1,15 @@
header: spawn.h
-header_template: spawn.h.def
-macros: []
+standards:
+ - posix
types:
- type_name: posix_spawn_file_actions_t
- type_name: posix_spawnattr_t
- type_name: pid_t
- type_name: mode_t
-enums: []
-objects: []
functions:
- name: posix_spawn
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: pid_t *__restrict
@@ -22,14 +20,14 @@ functions:
- type: char * const * __restrict
- name: posix_spawn_file_actions_addclose
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: posix_spawn_file_actions_t *
- type: int
- name: posix_spawn_file_actions_adddup2
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: posix_spawn_file_actions_t *
@@ -37,7 +35,7 @@ functions:
- type: int
- name: posix_spawn_file_actions_addopen
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: posix_spawn_file_actions_t *__restrict
@@ -47,13 +45,13 @@ functions:
- type: mode_t
- name: posix_spawn_file_actions_destroy
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: posix_spawn_file_actions_t *
- name: posix_spawn_file_actions_init
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: posix_spawn_file_actions_t *
diff --git a/libc/include/string.h.def b/libc/include/string.h.def
deleted file mode 100644
index 339d005e43a4f..0000000000000
--- a/libc/include/string.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- C standard library header string.h --------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_STRING_H
-#define LLVM_LIBC_STRING_H
-
-#include "__llvm-libc-common.h"
-
-%%public_api()
-
-#endif // LLVM_LIBC_STRING_H
diff --git a/libc/include/string.yaml b/libc/include/string.yaml
index 736deceb453de..0bf297ee747a4 100644
--- a/libc/include/string.yaml
+++ b/libc/include/string.yaml
@@ -1,5 +1,6 @@
header: string.h
-header_template: string.h.def
+standards:
+ - stdc
macros:
- macro_name: NULL
macro_header: null-macro.h
@@ -11,7 +12,7 @@ objects: []
functions:
- name: memccpy
standards:
- - POSIX
+ - posix
return_type: void *
arguments:
- type: void *__restrict
@@ -61,7 +62,7 @@ functions:
- type: size_t
- name: mempcpy
standards:
- - POSIX
+ - posix
return_type: void *
arguments:
- type: void *__restrict
@@ -93,14 +94,14 @@ functions:
- type: size_t
- name: stpcpy
standards:
- - POSIX
+ - posix
return_type: char *
arguments:
- type: char *__restrict
- type: const char *__restrict
- name: stpncpy
standards:
- - POSIX
+ - posix
return_type: char *
arguments:
- type: char *__restrict
@@ -243,7 +244,7 @@ functions:
- type: size_t
- name: strnlen
standards:
- - POSIX
+ - posix
return_type: size_t
arguments:
- type: const char *
@@ -271,7 +272,7 @@ functions:
- type: const char *__restrict
- name: strsignal
standards:
- - POSIX
+ - posix
return_type: char *
arguments:
- type: int
@@ -298,7 +299,7 @@ functions:
- type: const char *__restrict
- name: strtok_r
standards:
- - POSIX
+ - posix
return_type: char *
arguments:
- type: char *__restrict
diff --git a/libc/include/strings.h.def b/libc/include/strings.h.def
deleted file mode 100644
index 9b016bf0bc50b..0000000000000
--- a/libc/include/strings.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- C standard library header strings.h -------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_STRINGS_H
-#define LLVM_LIBC_STRINGS_H
-
-#include "__llvm-libc-common.h"
-
-%%public_api()
-
-#endif // LLVM_LIBC_STRINGS_H
diff --git a/libc/include/strings.yaml b/libc/include/strings.yaml
index 855800d9dbc3d..1e78f0e48aa59 100644
--- a/libc/include/strings.yaml
+++ b/libc/include/strings.yaml
@@ -1,15 +1,14 @@
header: strings.h
-header_template: strings.h.def
-macros: []
+standards:
+ - bsd
+ - posix
types:
- type_name: size_t
- type_name: locale_t
-enums: []
-objects: []
functions:
- name: bcmp
standards:
- - llvm_libc_ext
+ - bsd
return_type: int
arguments:
- type: const void *
@@ -17,7 +16,7 @@ functions:
- type: size_t
- name: bcopy
standards:
- - llvm_libc_ext
+ - bsd
return_type: void
arguments:
- type: const void *
@@ -25,69 +24,61 @@ functions:
- type: size_t
- name: bzero
standards:
- - llvm_libc_ext
+ - bsd
return_type: void
arguments:
- type: void *
- type: size_t
- name: ffs
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: int
- name: ffsl
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: long
- name: ffsll
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: long long
- name: index
standards:
- - BSDExtensions
+ - bsd
return_type: char *
arguments:
- type: const char *
- type: int
- name: rindex
standards:
- - BSDExtensions
+ - bsd
return_type: char *
arguments:
- type: const char *
- type: int
- name: strcasecmp
- standards:
- - BSDExtensions
return_type: int
arguments:
- type: const char *
- type: const char *
- name: strcasecmp_l
- standards:
- - BSDExtensions
return_type: int
arguments:
- type: const char *
- type: const char *
- type: locale_t
- name: strncasecmp
- standards:
- - BSDExtensions
return_type: int
arguments:
- type: const char *
- type: const char *
- type: size_t
- name: strncasecmp_l
- standards:
- - BSDExtensions
return_type: int
arguments:
- type: const char *
diff --git a/libc/include/sys/sendfile.h.def b/libc/include/sys/sendfile.h.def
deleted file mode 100644
index d7f21f91f95ed..0000000000000
--- a/libc/include/sys/sendfile.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- Linux sys/sendfile.h ----------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SYS_SENDFILE_H
-#define LLVM_LIBC_SYS_SENDFILE_H
-
-#include "__llvm-libc-common.h"
-
-%%public_api()
-
-#endif // LLVM_LIBC_SYS_SENDFILE_H
diff --git a/libc/include/sys/sendfile.yaml b/libc/include/sys/sendfile.yaml
index 259ab83dff54b..a845dab580483 100644
--- a/libc/include/sys/sendfile.yaml
+++ b/libc/include/sys/sendfile.yaml
@@ -1,16 +1,8 @@
header: sys/sendfile.h
-header_template: sendfile.h.def
-macros: []
-types:
- - type_name: ssize_t
- - type_name: size_t
- - type_name: off_t
-enums: []
-objects: []
+standards:
+ - linux
functions:
- name: sendfile
- standards:
- - GNUExtensions
return_type: ssize_t
arguments:
- type: int
diff --git a/libc/include/sys/statvfs.h.def b/libc/include/sys/statvfs.h.def
deleted file mode 100644
index f23c9a3d5b1f9..0000000000000
--- a/libc/include/sys/statvfs.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- POSIX header statvfs.h --------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SYS_STATVFS_H
-#define LLVM_LIBC_SYS_STATVFS_H
-
-#include <__llvm-libc-common.h>
-
-%%public_api()
-
-#endif // LLVM_LIBC_SYS_STATVFS_H
diff --git a/libc/include/sys/statvfs.yaml b/libc/include/sys/statvfs.yaml
index 8c1d254add37f..e083677beee89 100644
--- a/libc/include/sys/statvfs.yaml
+++ b/libc/include/sys/statvfs.yaml
@@ -1,23 +1,21 @@
header: sys/statvfs.h
-header_template: statvfs.h.def
-macros: []
+standards:
+ - posix
types:
- type_name: struct_statvfs
- type_name: fsblkcnt_t
- type_name: fsfilcnt_t
-enums: []
-objects: []
functions:
- name: fstatvfs
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: int
- type: struct statvfs *
- name: statvfs
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: const char *__restrict
diff --git a/libc/include/sys/types.yaml b/libc/include/sys/types.yaml
index 6fa0b448fcd38..a00429d3817e1 100644
--- a/libc/include/sys/types.yaml
+++ b/libc/include/sys/types.yaml
@@ -1,32 +1,28 @@
header: sys/types.h
-header_template: types.h.def
-standards: POSIX
-macros: []
+standards:
+ - posix
types:
- - type_name: uid_t
- - type_name: time_t
- - type_name: pthread_t
- - type_name: pthread_rwlock_t
- - type_name: pthread_rwlockattr_t
- - type_name: pthread_mutex_t
- type_name: blkcnt_t
- type_name: blksize_t
- type_name: clockid_t
- - type_name: ssize_t
- - type_name: pthread_mutexattr_t
- - type_name: ino_t
- - type_name: pthread_once_t
- - type_name: mode_t
- type_name: dev_t
- - type_name: pthread_attr_t
- type_name: gid_t
- - type_name: pid_t
+ - type_name: ino_t
+ - type_name: mode_t
- type_name: nlink_t
- - type_name: suseconds_t
- type_name: off_t
- - type_name: size_t
- - type_name: pthread_key_t
+ - type_name: pid_t
+ - type_name: pthread_attr_t
- type_name: pthread_condattr_t
-enums: []
-objects: []
-functions: []
+ - type_name: pthread_key_t
+ - type_name: pthread_mutex_t
+ - type_name: pthread_mutexattr_t
+ - type_name: pthread_once_t
+ - type_name: pthread_rwlock_t
+ - type_name: pthread_rwlockattr_t
+ - type_name: pthread_t
+ - type_name: size_t
+ - type_name: ssize_t
+ - type_name: suseconds_t
+ - type_name: time_t
+ - type_name: uid_t
diff --git a/libc/include/sys/uio.h.def b/libc/include/sys/uio.h.def
deleted file mode 100644
index 76496cb2310f7..0000000000000
--- a/libc/include/sys/uio.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- POSIX header uio.h ------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SYS_UIO_H
-#define LLVM_LIBC_SYS_UIO_H
-
-#include "__llvm-libc-common.h"
-
-%%public_api()
-
-#endif // LLVM_LIBC_SYS_UIO_H
diff --git a/libc/include/sys/uio.yaml b/libc/include/sys/uio.yaml
index 6d3f336b2b520..929911e669386 100644
--- a/libc/include/sys/uio.yaml
+++ b/libc/include/sys/uio.yaml
@@ -1,15 +1,13 @@
header: sys/uio.h
-header_template: uio.h.def
-macros: []
+standards:
+ - posix
types:
- type_name: struct_iovec
- type_name: ssize_t
-enums: []
-objects: []
functions:
- name: writev
standards:
- - POSIX
+ - posix
return_type: ssize_t
arguments:
- type: int
@@ -17,7 +15,7 @@ functions:
- type: int
- name: readv
standards:
- - POSIX
+ - posix
return_type: ssize_t
arguments:
- type: int
diff --git a/libc/include/sys/utsname.h.def b/libc/include/sys/utsname.h.def
deleted file mode 100644
index 08dbbfc062453..0000000000000
--- a/libc/include/sys/utsname.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- Linux sys/utsname.h -----------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SYS_UTSNAME_H
-#define LLVM_LIBC_SYS_UTSNAME_H
-
-#include "__llvm-libc-common.h"
-
-%%public_api()
-
-#endif // LLVM_LIBC_SYS_UTSNAME_H
diff --git a/libc/include/sys/utsname.yaml b/libc/include/sys/utsname.yaml
index 6c7cb71f9a34f..0f0e4cdb38952 100644
--- a/libc/include/sys/utsname.yaml
+++ b/libc/include/sys/utsname.yaml
@@ -1,14 +1,12 @@
header: sys/utsname.h
-header_template: utsname.h.def
-macros: []
+standards:
+ - posix
types:
- type_name: struct_utsname
-enums: []
-objects: []
functions:
- name: uname
standards:
- - POSIX
+ - posix
return_type: int
arguments:
- type: struct utsname *
diff --git a/libc/include/threads.h.def b/libc/include/threads.h.def
deleted file mode 100644
index b114bea0ace34..0000000000000
--- a/libc/include/threads.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- C standard library header threads.h -------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_THREADS_H
-#define LLVM_LIBC_THREADS_H
-
-#include "__llvm-libc-common.h"
-
-%%public_api()
-
-#endif // LLVM_LIBC_THREADS_H
diff --git a/libc/include/threads.yaml b/libc/include/threads.yaml
index 7014822f9251d..99b29f1815549 100644
--- a/libc/include/threads.yaml
+++ b/libc/include/threads.yaml
@@ -1,5 +1,6 @@
header: threads.h
-header_template: threads.h.def
+standards:
+ - stdc
macros:
- macro_name: ONCE_FLAG_INIT
macro_value: '{0}'
diff --git a/libc/include/uchar.h.def b/libc/include/uchar.h.def
deleted file mode 100644
index 31b7fcb73ded6..0000000000000
--- a/libc/include/uchar.h.def
+++ /dev/null
@@ -1,16 +0,0 @@
-//===-- C standard library header uchar.h ---------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_UCHAR_H
-#define LLVM_LIBC_UCHAR_H
-
-#include "__llvm-libc-common.h"
-
-%%public_api()
-
-#endif // LLVM_LIBC_UCHAR_H
diff --git a/libc/include/uchar.yaml b/libc/include/uchar.yaml
index 713919796762d..d0799e28ac9cb 100644
--- a/libc/include/uchar.yaml
+++ b/libc/include/uchar.yaml
@@ -1,14 +1,9 @@
header: uchar.h
-header_template: uchar.h.def
standards:
- stdc
-macros: []
types:
- type_name: char32_t
- type_name: char16_t
- type_name: char8_t
- type_name: mbstate_t
- type_name: size_t
-enums: []
-objects: []
-functions: []
>From 8b1b159b781222728db6c7f5ccd9584cf0ca4b10 Mon Sep 17 00:00:00 2001
From: Stanislav Mekhanoshin <Stanislav.Mekhanoshin at amd.com>
Date: Fri, 18 Jul 2025 12:31:29 -0700
Subject: [PATCH 11/12] [AMDGPU] Select flat GVS atomics on gfx1250 (#149554)
---
llvm/lib/Target/AMDGPU/AMDGPUGISel.td | 3 +
llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 23 +
llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h | 6 +
.../AMDGPU/AMDGPUInstructionSelector.cpp | 20 +-
.../Target/AMDGPU/AMDGPUInstructionSelector.h | 4 +
llvm/lib/Target/AMDGPU/FLATInstructions.td | 60 +-
.../test/CodeGen/AMDGPU/flat-saddr-atomics.ll | 6030 +++++++++++++++++
7 files changed, 6120 insertions(+), 26 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index 7b5d4077e85f3..2bfd56f9f3554 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -137,6 +137,9 @@ def gi_global_offset :
def gi_global_saddr :
GIComplexOperandMatcher<s64, "selectGlobalSAddr">,
GIComplexPatternEquiv<GlobalSAddr>;
+def gi_global_saddr_glc :
+ GIComplexOperandMatcher<s64, "selectGlobalSAddrGLC">,
+ GIComplexPatternEquiv<GlobalSAddrGLC>;
def gi_mubuf_scratch_offset :
GIComplexOperandMatcher<s32, "selectMUBUFScratchOffset">,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 25672a52345cb..00c7f0eb6e9f1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -1968,6 +1968,29 @@ bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(SDNode *N,
return true;
}
+bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(SDNode *N, SDValue Addr,
+ SDValue &SAddr, SDValue &VOffset,
+ SDValue &Offset,
+ SDValue &CPol) const {
+ if (!SelectGlobalSAddr(N, Addr, SAddr, VOffset, Offset))
+ return false;
+
+ CPol = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
+ return true;
+}
+
+bool AMDGPUDAGToDAGISel::SelectGlobalSAddrGLC(SDNode *N, SDValue Addr,
+ SDValue &SAddr, SDValue &VOffset,
+ SDValue &Offset,
+ SDValue &CPol) const {
+ if (!SelectGlobalSAddr(N, Addr, SAddr, VOffset, Offset))
+ return false;
+
+ unsigned CPolVal = AMDGPU::CPol::GLC;
+ CPol = CurDAG->getTargetConstant(CPolVal, SDLoc(), MVT::i32);
+ return true;
+}
+
static SDValue SelectSAddrFI(SelectionDAG *CurDAG, SDValue SAddr) {
if (auto *FI = dyn_cast<FrameIndexSDNode>(SAddr)) {
SAddr = CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0));
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
index 9967f46e085e4..acbab3d9e2d81 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
@@ -163,6 +163,12 @@ class AMDGPUDAGToDAGISel : public SelectionDAGISel {
SDValue &Offset) const;
bool SelectGlobalSAddr(SDNode *N, SDValue Addr, SDValue &SAddr,
SDValue &VOffset, SDValue &Offset) const;
+ bool SelectGlobalSAddr(SDNode *N, SDValue Addr, SDValue &SAddr,
+ SDValue &VOffset, SDValue &Offset,
+ SDValue &CPol) const;
+ bool SelectGlobalSAddrGLC(SDNode *N, SDValue Addr, SDValue &SAddr,
+ SDValue &VOffset, SDValue &Offset,
+ SDValue &CPol) const;
bool SelectScratchSAddr(SDNode *N, SDValue Addr, SDValue &SAddr,
SDValue &Offset) const;
bool checkFlatScratchSVSSwizzleBug(SDValue VAddr, SDValue SAddr,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 1a63c48e3666c..d161c035ac295 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -5485,7 +5485,8 @@ AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
// Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
InstructionSelector::ComplexRendererFns
-AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
+AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root,
+ unsigned CPolBits) const {
Register Addr = Root.getReg();
Register PtrBase;
int64_t ConstOffset;
@@ -5529,6 +5530,7 @@ AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
MIB.addReg(HighBits);
}, // voffset
[=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
+ [=](MachineInstrBuilder &MIB) { MIB.addImm(CPolBits); },
}};
}
}
@@ -5568,6 +5570,9 @@ AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
},
[=](MachineInstrBuilder &MIB) { // offset
MIB.addImm(ImmOffset);
+ },
+ [=](MachineInstrBuilder &MIB) { // cpol
+ MIB.addImm(CPolBits);
}}};
}
}
@@ -5591,10 +5596,21 @@ AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
return {{
[=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
[=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset
- [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
+ [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }, // offset
+ [=](MachineInstrBuilder &MIB) { MIB.addImm(CPolBits); } // cpol
}};
}
+InstructionSelector::ComplexRendererFns
+AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
+ return selectGlobalSAddr(Root, 0);
+}
+
+InstructionSelector::ComplexRendererFns
+AMDGPUInstructionSelector::selectGlobalSAddrGLC(MachineOperand &Root) const {
+ return selectGlobalSAddr(Root, AMDGPU::CPol::GLC);
+}
+
InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
Register Addr = Root.getReg();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index 2cb7904d27ccc..34bdf0a6d4ab2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -253,8 +253,12 @@ class AMDGPUInstructionSelector final : public InstructionSelector {
InstructionSelector::ComplexRendererFns
selectScratchOffset(MachineOperand &Root) const;
+ InstructionSelector::ComplexRendererFns
+ selectGlobalSAddr(MachineOperand &Root, unsigned CPolBits) const;
InstructionSelector::ComplexRendererFns
selectGlobalSAddr(MachineOperand &Root) const;
+ InstructionSelector::ComplexRendererFns
+ selectGlobalSAddrGLC(MachineOperand &Root) const;
InstructionSelector::ComplexRendererFns
selectScratchSAddr(MachineOperand &Root) const;
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index c8a4e22ed1dae..1432b5940f3f0 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -11,7 +11,8 @@ let WantsRoot = true in {
def GlobalOffset : ComplexPattern<iPTR, 2, "SelectGlobalOffset", [], [], -10>;
def ScratchOffset : ComplexPattern<iPTR, 2, "SelectScratchOffset", [], [], -10>;
- def GlobalSAddr : ComplexPattern<iPTR, 3, "SelectGlobalSAddr", [], [], -10>;
+ def GlobalSAddr : ComplexPattern<iPTR, 4, "SelectGlobalSAddr", [], [], -10>;
+ def GlobalSAddrGLC : ComplexPattern<iPTR, 4, "SelectGlobalSAddrGLC", [], [], -10>;
def ScratchSAddr : ComplexPattern<iPTR, 2, "SelectScratchSAddr", [], [], -10>;
def ScratchSVAddr : ComplexPattern<iPTR, 3, "SelectScratchSVAddr", [], [], -10>;
}
@@ -1252,13 +1253,13 @@ class GlobalLoadSaddrPat_D16 <FLAT_Pseudo inst, SDPatternOperator node, ValueTyp
>;
class FlatLoadSaddrPat_D16 <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : GCNPat <
- (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), vt:$in)),
- (inst $saddr, $voffset, $offset, (i32 0), $in)
+ (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset, CPol:$cpol), vt:$in)),
+ (inst $saddr, $voffset, $offset, $cpol, $in)
>;
class FlatLoadSaddrPat_D16_t16 <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : GCNPat <
- (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset))),
- (inst $saddr, $voffset, $offset, (i32 0))
+ (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset, CPol:$cpol))),
+ (inst $saddr, $voffset, $offset, $cpol)
>;
class GlobalLoadSaddrPat_D16_t16 <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : GCNPat <
@@ -1272,26 +1273,26 @@ class FlatLoadSignedPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt>
>;
class FlatLoadSaddrPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : GCNPat <
- (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset))),
- (inst $saddr, $voffset, $offset, 0)
+ (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset, CPol:$cpol))),
+ (inst $saddr, $voffset, $offset, $cpol)
>;
class FlatStoreSaddrPat <FLAT_Pseudo inst, SDPatternOperator node,
ValueType vt> : GCNPat <
- (node vt:$data, (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset)),
- (inst $voffset, getVregSrcForVT<vt>.ret:$data, $saddr, $offset)
+ (node vt:$data, (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset, CPol:$cpol)),
+ (inst $voffset, getVregSrcForVT<vt>.ret:$data, $saddr, $offset, $cpol)
>;
-class GlobalAtomicSaddrPat <FLAT_Pseudo inst, SDPatternOperator node,
- ValueType vt, ValueType data_vt = vt> : GCNPat <
- (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), data_vt:$data)),
- (inst $voffset, getVregSrcForVT<data_vt>.ret:$data, $saddr, $offset)
+class FlatAtomicSaddrPat <FLAT_Pseudo inst, SDPatternOperator node, ComplexPattern pat,
+ ValueType vt, ValueType data_vt = vt> : GCNPat <
+ (vt (node (pat (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset, CPol:$cpol), data_vt:$data)),
+ (inst $voffset, getVregSrcForVT<data_vt>.ret:$data, $saddr, $offset, $cpol)
>;
class GlobalAtomicNoRtnSaddrPat <FLAT_Pseudo inst, SDPatternOperator node,
ValueType vt> : GCNPat <
- (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), vt:$data),
- (inst $voffset, getVregSrcForVT<vt>.ret:$data, $saddr, $offset)
+ (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset, CPol:$cpol), vt:$data),
+ (inst $voffset, getVregSrcForVT<vt>.ret:$data, $saddr, $offset, $cpol)
>;
class FlatStorePat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : GCNPat <
@@ -1320,6 +1321,12 @@ multiclass FlatAtomicNoRtnPatBase <string inst, string node, ValueType vt,
let AddedComplexity = 1 in
def : GCNPat <(vt (noRtnNode (FlatOffset i64:$vaddr, i32:$offset), data_vt:$data)),
(!cast<FLAT_Pseudo>(inst) VReg_64:$vaddr, getVregSrcForVT<data_vt>.ret:$data, $offset)>;
+
+ def : FlatAtomicSaddrPat<!cast<FLAT_Pseudo>(inst#"_SADDR"), !cast<SDPatternOperator>(node),
+ GlobalSAddr, vt, data_vt> {
+ let AddedComplexity = 9;
+ let SubtargetPredicate = HasFlatGVSMode;
+ }
}
multiclass FlatAtomicNoRtnPatWithAddrSpace<string inst, string node, string addrSpaceSuffix,
@@ -1338,6 +1345,11 @@ multiclass FlatAtomicRtnPatBase <string inst, string node, ValueType vt,
def : GCNPat <(vt (rtnNode (FlatOffset i64:$vaddr, i32:$offset), data_vt:$data)),
(!cast<FLAT_Pseudo>(inst#"_RTN") VReg_64:$vaddr, getVregSrcForVT<data_vt>.ret:$data, $offset)>;
+
+ def : FlatAtomicSaddrPat<!cast<FLAT_Pseudo>(inst#"_SADDR_RTN"), rtnNode, GlobalSAddrGLC, vt, data_vt> {
+ let AddedComplexity = 8;
+ let SubtargetPredicate = HasFlatGVSMode;
+ }
}
multiclass FlatAtomicRtnPatWithAddrSpace<string inst, string intr, string addrSpaceSuffix,
@@ -1507,7 +1519,8 @@ multiclass GlobalFLATAtomicPatsNoRtnBase<string inst, string node, ValueType vt,
def : FlatSignedAtomicPatBase<!cast<FLAT_Pseudo>(inst), !cast<SDPatternOperator>(node), vt, data_vt>;
let AddedComplexity = 13 in
- def : GlobalAtomicSaddrPat<!cast<FLAT_Pseudo>(inst#"_SADDR"), !cast<SDPatternOperator>(node), vt, data_vt>;
+ def : FlatAtomicSaddrPat<!cast<FLAT_Pseudo>(inst#"_SADDR"), !cast<SDPatternOperator>(node),
+ GlobalSAddr, vt, data_vt>;
}
multiclass GlobalFLATAtomicPatsRtnBase<string inst, string node, ValueType vt,
@@ -1518,7 +1531,7 @@ multiclass GlobalFLATAtomicPatsRtnBase<string inst, string node, ValueType vt,
def : FlatSignedAtomicPatBase<!cast<FLAT_Pseudo>(inst#"_RTN"), rtnNode, vt, data_vt>;
let AddedComplexity = 12 in
- def : GlobalAtomicSaddrPat<!cast<FLAT_Pseudo>(inst#"_SADDR_RTN"), rtnNode, vt, data_vt>;
+ def : FlatAtomicSaddrPat<!cast<FLAT_Pseudo>(inst#"_SADDR_RTN"), rtnNode, GlobalSAddrGLC, vt, data_vt>;
}
multiclass GlobalFLATAtomicPatsNoRtn<string inst, string node, ValueType vt,
@@ -1797,12 +1810,13 @@ defm : FlatAtomicPat <"FLAT_ATOMIC_MAX_F64", "atomic_load_fmax_"#as, f64>;
defm : FlatStorePats <FLAT_STORE_BYTE, truncstorei8_flat, i16>;
defm : FlatStorePats <FLAT_STORE_SHORT, store_flat, i16>;
-let SubtargetPredicate = isGFX12Plus in {
- defm : FlatAtomicRtnPatWithAddrSpace<"FLAT_ATOMIC_COND_SUB_U32", "int_amdgcn_atomic_cond_sub_u32", "flat_addrspace", i32 >;
+} // End OtherPredicates = [HasFlatAddressSpace]
- let OtherPredicates = [HasAtomicCSubNoRtnInsts] in
- defm : FlatAtomicNoRtnPatWithAddrSpace<"FLAT_ATOMIC_COND_SUB_U32", "int_amdgcn_atomic_cond_sub_u32", "flat_addrspace", i32>;
-}
+let OtherPredicates = [isGFX12Plus] in
+defm : FlatAtomicRtnPatWithAddrSpace<"FLAT_ATOMIC_COND_SUB_U32", "int_amdgcn_atomic_cond_sub_u32", "flat_addrspace", i32>;
+
+let OtherPredicates = [isGFX12Plus, HasAtomicCSubNoRtnInsts] in
+defm : FlatAtomicNoRtnPatWithAddrSpace<"FLAT_ATOMIC_COND_SUB_U32", "int_amdgcn_atomic_cond_sub_u32", "flat_addrspace", i32>;
let OtherPredicates = [HasD16LoadStore] in {
defm : FlatStorePats <FLAT_STORE_SHORT_D16_HI, truncstorei16_hi16_flat, i32>;
@@ -1826,8 +1840,6 @@ defm : FlatLoadPats_D16 <FLAT_LOAD_SHORT_D16, load_d16_lo_flat, v2i16>;
defm : FlatLoadPats_D16 <FLAT_LOAD_SHORT_D16, load_d16_lo_flat, v2f16>;
}
-} // End OtherPredicates = [HasFlatAddressSpace]
-
let OtherPredicates = [HasFlatGlobalInsts] in {
defm : GlobalFLATLoadPats <GLOBAL_LOAD_UBYTE, atomic_load_aext_8_global, i32>;
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
new file mode 100644
index 0000000000000..f4040f3049e0d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
@@ -0,0 +1,6030 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+
+; Test using saddr addressing mode of flat_* atomic instructions.
+
+define amdgpu_ps void @flat_xchg_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_xchg_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_swap_b32 v0, v1, s[2:3] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw xchg ptr %gep0, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+; Maximum positive offset on gfx10
+define amdgpu_ps void @flat_xchg_saddr_i32_nortn_offset_2047(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_xchg_saddr_i32_nortn_offset_2047:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_swap_b32 v0, v1, s[2:3] offset:2047 scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 2047
+ %unused = atomicrmw xchg ptr %gep1, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+; Maximum negative offset on gfx10
+define amdgpu_ps void @flat_xchg_saddr_i32_nortn_offset_neg2048(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_xchg_saddr_i32_nortn_offset_neg2048:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_swap_b32 v0, v1, s[2:3] offset:-2048 scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -2048
+ %unused = atomicrmw xchg ptr %gep1, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps float @flat_xchg_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_xchg_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_swap_b32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw xchg ptr %gep0, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_xchg_saddr_i32_rtn_2048(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_xchg_saddr_i32_rtn_2048:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_swap_b32 v0, v0, v1, s[2:3] offset:2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 2048
+ %rtn = atomicrmw xchg ptr %gep1, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_xchg_saddr_i32_rtn_neg2048(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_xchg_saddr_i32_rtn_neg2048:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_swap_b32 v0, v0, v1, s[2:3] offset:-2048 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -2048
+ %rtn = atomicrmw xchg ptr %gep1, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+; --------------------------------------------------------------------------------
+; Uniformity edge cases
+; --------------------------------------------------------------------------------
+
+ at ptr.in.lds = internal addrspace(3) global ptr undef
+
+; Base pointer is uniform, but also in VGPRs
+define amdgpu_ps float @flat_xchg_saddr_uniform_ptr_in_vgprs_rtn(i32 %voffset, i32 %data) {
+; GFX1250-SDAG-LABEL: flat_xchg_saddr_uniform_ptr_in_vgprs_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-SDAG-NEXT: ds_load_b64 v[2:3], v2
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: v_readfirstlane_b32 s0, v2
+; GFX1250-SDAG-NEXT: v_readfirstlane_b32 s1, v3
+; GFX1250-SDAG-NEXT: flat_atomic_swap_b32 v0, v0, v1, s[0:1] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: flat_xchg_saddr_uniform_ptr_in_vgprs_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-GISEL-NEXT: ds_load_b64 v[2:3], v2
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: flat_atomic_swap_b32 v0, v[2:3], v1 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
+ %sbase = load ptr, ptr addrspace(3) @ptr.in.lds
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw xchg ptr %gep0, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+; Base pointer is uniform, but also in VGPRs, with imm offset
+define amdgpu_ps float @flat_xchg_saddr_uniform_ptr_in_vgprs_rtn_immoffset(i32 %voffset, i32 %data) {
+; GFX1250-SDAG-LABEL: flat_xchg_saddr_uniform_ptr_in_vgprs_rtn_immoffset:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-SDAG-NEXT: ds_load_b64 v[2:3], v2
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: v_readfirstlane_b32 s0, v2
+; GFX1250-SDAG-NEXT: v_readfirstlane_b32 s1, v3
+; GFX1250-SDAG-NEXT: flat_atomic_swap_b32 v0, v0, v1, s[0:1] offset:42 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: flat_xchg_saddr_uniform_ptr_in_vgprs_rtn_immoffset:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-GISEL-NEXT: ds_load_b64 v[2:3], v2
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: flat_atomic_swap_b32 v0, v[2:3], v1 offset:42 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
+ %sbase = load ptr, ptr addrspace(3) @ptr.in.lds
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 42
+ %rtn = atomicrmw xchg ptr %gep1, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+; Base pointer is uniform, but also in VGPRs
+define amdgpu_ps void @flat_xchg_saddr_uniform_ptr_in_vgprs_nortn(i32 %voffset, i32 %data) {
+; GFX1250-SDAG-LABEL: flat_xchg_saddr_uniform_ptr_in_vgprs_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-SDAG-NEXT: ds_load_b64 v[2:3], v2
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: v_readfirstlane_b32 s0, v2
+; GFX1250-SDAG-NEXT: v_readfirstlane_b32 s1, v3
+; GFX1250-SDAG-NEXT: flat_atomic_swap_b32 v0, v1, s[0:1] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_xchg_saddr_uniform_ptr_in_vgprs_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-GISEL-NEXT: ds_load_b64 v[2:3], v2
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: flat_atomic_swap_b32 v[2:3], v1 scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_endpgm
+ %sbase = load ptr, ptr addrspace(3) @ptr.in.lds
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw xchg ptr %gep0, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+; Base pointer is uniform, but also in VGPRs, with imm offset
+define amdgpu_ps void @flat_xchg_saddr_uniform_ptr_in_vgprs_nortn_immoffset(i32 %voffset, i32 %data) {
+; GFX1250-SDAG-LABEL: flat_xchg_saddr_uniform_ptr_in_vgprs_nortn_immoffset:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-SDAG-NEXT: ds_load_b64 v[2:3], v2
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: v_readfirstlane_b32 s0, v2
+; GFX1250-SDAG-NEXT: v_readfirstlane_b32 s1, v3
+; GFX1250-SDAG-NEXT: flat_atomic_swap_b32 v0, v1, s[0:1] offset:42 scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_xchg_saddr_uniform_ptr_in_vgprs_nortn_immoffset:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-GISEL-NEXT: ds_load_b64 v[2:3], v2
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: flat_atomic_swap_b32 v[2:3], v1 offset:42 scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_endpgm
+ %sbase = load ptr, ptr addrspace(3) @ptr.in.lds
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 42
+ %unused = atomicrmw xchg ptr %gep1, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; All atomicrmw ops
+; --------------------------------------------------------------------------------
+
+; --------------------------------------------------------------------------------
+; atomicrmw xchg
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_xchg_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB10_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB10_4
+; GFX1250-SDAG-NEXT: .LBB10_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB10_5
+; GFX1250-SDAG-NEXT: .LBB10_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_swap_b64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB10_2
+; GFX1250-SDAG-NEXT: .LBB10_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB10_5
+; GFX1250-SDAG-NEXT: .LBB10_5:
+;
+; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB10_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB10_4
+; GFX1250-GISEL-NEXT: .LBB10_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB10_5
+; GFX1250-GISEL-NEXT: .LBB10_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB10_2
+; GFX1250-GISEL-NEXT: .LBB10_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB10_5
+; GFX1250-GISEL-NEXT: .LBB10_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw xchg ptr %gep0, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_xchg_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB11_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB11_4
+; GFX1250-SDAG-NEXT: .LBB11_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB11_5
+; GFX1250-SDAG-NEXT: .LBB11_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_swap_b64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB11_2
+; GFX1250-SDAG-NEXT: .LBB11_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB11_5
+; GFX1250-SDAG-NEXT: .LBB11_5:
+;
+; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB11_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB11_4
+; GFX1250-GISEL-NEXT: .LBB11_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB11_5
+; GFX1250-GISEL-NEXT: .LBB11_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB11_2
+; GFX1250-GISEL-NEXT: .LBB11_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB11_5
+; GFX1250-GISEL-NEXT: .LBB11_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw xchg ptr %gep1, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_xchg_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB12_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB12_4
+; GFX1250-SDAG-NEXT: .LBB12_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB12_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_swap_b64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB12_2
+; GFX1250-SDAG-NEXT: .LBB12_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB12_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB12_4
+; GFX1250-GISEL-NEXT: .LBB12_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB12_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB12_2
+; GFX1250-GISEL-NEXT: .LBB12_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw xchg ptr %gep0, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_xchg_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB13_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB13_4
+; GFX1250-SDAG-NEXT: .LBB13_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB13_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_swap_b64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB13_2
+; GFX1250-SDAG-NEXT: .LBB13_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB13_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB13_4
+; GFX1250-GISEL-NEXT: .LBB13_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB13_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB13_2
+; GFX1250-GISEL-NEXT: .LBB13_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw xchg ptr %gep1, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw add
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_add_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_add_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_add_u32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw add ptr %gep0, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_add_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_add_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_add_u32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw add ptr %gep1, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_add_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_add_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_add_u32 v0, v1, s[2:3] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw add ptr %gep0, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_add_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_add_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_add_u32 v0, v1, s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw add ptr %gep1, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_add_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB18_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB18_4
+; GFX1250-SDAG-NEXT: .LBB18_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB18_5
+; GFX1250-SDAG-NEXT: .LBB18_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_add_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB18_2
+; GFX1250-SDAG-NEXT: .LBB18_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB18_5
+; GFX1250-SDAG-NEXT: .LBB18_5:
+;
+; GFX1250-GISEL-LABEL: flat_add_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB18_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB18_4
+; GFX1250-GISEL-NEXT: .LBB18_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB18_5
+; GFX1250-GISEL-NEXT: .LBB18_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB18_2
+; GFX1250-GISEL-NEXT: .LBB18_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB18_5
+; GFX1250-GISEL-NEXT: .LBB18_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw add ptr %gep0, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_add_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB19_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB19_4
+; GFX1250-SDAG-NEXT: .LBB19_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB19_5
+; GFX1250-SDAG-NEXT: .LBB19_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_add_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB19_2
+; GFX1250-SDAG-NEXT: .LBB19_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB19_5
+; GFX1250-SDAG-NEXT: .LBB19_5:
+;
+; GFX1250-GISEL-LABEL: flat_add_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB19_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB19_4
+; GFX1250-GISEL-NEXT: .LBB19_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB19_5
+; GFX1250-GISEL-NEXT: .LBB19_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB19_2
+; GFX1250-GISEL-NEXT: .LBB19_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB19_5
+; GFX1250-GISEL-NEXT: .LBB19_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw add ptr %gep1, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_add_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB20_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB20_4
+; GFX1250-SDAG-NEXT: .LBB20_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB20_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_add_u64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB20_2
+; GFX1250-SDAG-NEXT: .LBB20_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_add_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB20_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB20_4
+; GFX1250-GISEL-NEXT: .LBB20_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB20_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB20_2
+; GFX1250-GISEL-NEXT: .LBB20_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw add ptr %gep0, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_add_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB21_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB21_4
+; GFX1250-SDAG-NEXT: .LBB21_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB21_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_add_u64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB21_2
+; GFX1250-SDAG-NEXT: .LBB21_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_add_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB21_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB21_4
+; GFX1250-GISEL-NEXT: .LBB21_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB21_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB21_2
+; GFX1250-GISEL-NEXT: .LBB21_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[4:5]
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw add ptr %gep1, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw sub
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_sub_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_sub_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_sub_u32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw sub ptr %gep0, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_sub_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_sub_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_sub_u32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw sub ptr %gep1, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_sub_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_sub_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_sub_u32 v0, v1, s[2:3] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw sub ptr %gep0, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_sub_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_sub_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_sub_u32 v0, v1, s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw sub ptr %gep1, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_sub_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB26_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB26_4
+; GFX1250-SDAG-NEXT: .LBB26_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB26_5
+; GFX1250-SDAG-NEXT: .LBB26_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_sub_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB26_2
+; GFX1250-SDAG-NEXT: .LBB26_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v2
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB26_5
+; GFX1250-SDAG-NEXT: .LBB26_5:
+;
+; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB26_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB26_4
+; GFX1250-GISEL-NEXT: .LBB26_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB26_5
+; GFX1250-GISEL-NEXT: .LBB26_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB26_2
+; GFX1250-GISEL-NEXT: .LBB26_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_sub_co_ci_u32_e64 v3, null, v1, v5, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB26_5
+; GFX1250-GISEL-NEXT: .LBB26_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw sub ptr %gep0, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_sub_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB27_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB27_4
+; GFX1250-SDAG-NEXT: .LBB27_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB27_5
+; GFX1250-SDAG-NEXT: .LBB27_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_sub_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB27_2
+; GFX1250-SDAG-NEXT: .LBB27_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v2
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB27_5
+; GFX1250-SDAG-NEXT: .LBB27_5:
+;
+; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB27_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB27_4
+; GFX1250-GISEL-NEXT: .LBB27_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB27_5
+; GFX1250-GISEL-NEXT: .LBB27_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB27_2
+; GFX1250-GISEL-NEXT: .LBB27_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_sub_co_ci_u32_e64 v3, null, v1, v5, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB27_5
+; GFX1250-GISEL-NEXT: .LBB27_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw sub ptr %gep1, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_sub_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB28_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB28_4
+; GFX1250-SDAG-NEXT: .LBB28_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB28_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_sub_u64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB28_2
+; GFX1250-SDAG-NEXT: .LBB28_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_sub_co_ci_u32_e64 v1, null, v1, v3, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB28_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB28_4
+; GFX1250-GISEL-NEXT: .LBB28_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB28_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB28_2
+; GFX1250-GISEL-NEXT: .LBB28_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_sub_co_ci_u32_e64 v1, null, v1, v5, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw sub ptr %gep0, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_sub_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB29_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB29_4
+; GFX1250-SDAG-NEXT: .LBB29_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB29_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_sub_u64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB29_2
+; GFX1250-SDAG-NEXT: .LBB29_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_sub_co_ci_u32_e64 v1, null, v1, v3, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB29_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB29_4
+; GFX1250-GISEL-NEXT: .LBB29_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB29_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB29_2
+; GFX1250-GISEL-NEXT: .LBB29_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_sub_co_ci_u32_e64 v1, null, v1, v5, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw sub ptr %gep1, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw and
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_and_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_and_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_and_b32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw and ptr %gep0, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_and_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_and_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_and_b32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw and ptr %gep1, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_and_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_and_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_and_b32 v0, v1, s[2:3] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw and ptr %gep0, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_and_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_and_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_and_b32 v0, v1, s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw and ptr %gep1, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_and_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB34_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB34_4
+; GFX1250-SDAG-NEXT: .LBB34_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB34_5
+; GFX1250-SDAG-NEXT: .LBB34_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_and_b64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB34_2
+; GFX1250-SDAG-NEXT: .LBB34_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v3, v1, v3
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB34_5
+; GFX1250-SDAG-NEXT: .LBB34_5:
+;
+; GFX1250-GISEL-LABEL: flat_and_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB34_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB34_4
+; GFX1250-GISEL-NEXT: .LBB34_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB34_5
+; GFX1250-GISEL-NEXT: .LBB34_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB34_2
+; GFX1250-GISEL-NEXT: .LBB34_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v0, v4
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v3, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB34_5
+; GFX1250-GISEL-NEXT: .LBB34_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw and ptr %gep0, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_and_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB35_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB35_4
+; GFX1250-SDAG-NEXT: .LBB35_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB35_5
+; GFX1250-SDAG-NEXT: .LBB35_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_and_b64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB35_2
+; GFX1250-SDAG-NEXT: .LBB35_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v3, v1, v3
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB35_5
+; GFX1250-SDAG-NEXT: .LBB35_5:
+;
+; GFX1250-GISEL-LABEL: flat_and_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB35_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB35_4
+; GFX1250-GISEL-NEXT: .LBB35_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB35_5
+; GFX1250-GISEL-NEXT: .LBB35_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB35_2
+; GFX1250-GISEL-NEXT: .LBB35_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v0, v4
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v3, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB35_5
+; GFX1250-GISEL-NEXT: .LBB35_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw and ptr %gep1, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_and_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB36_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB36_4
+; GFX1250-SDAG-NEXT: .LBB36_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB36_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_and_b64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB36_2
+; GFX1250-SDAG-NEXT: .LBB36_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_and_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB36_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB36_4
+; GFX1250-GISEL-NEXT: .LBB36_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB36_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB36_2
+; GFX1250-GISEL-NEXT: .LBB36_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw and ptr %gep0, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_and_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB37_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB37_4
+; GFX1250-SDAG-NEXT: .LBB37_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB37_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_and_b64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB37_2
+; GFX1250-SDAG-NEXT: .LBB37_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_and_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB37_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB37_4
+; GFX1250-GISEL-NEXT: .LBB37_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB37_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB37_2
+; GFX1250-GISEL-NEXT: .LBB37_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw and ptr %gep1, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw or
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_or_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_or_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_or_b32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw or ptr %gep0, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_or_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_or_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_or_b32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw or ptr %gep1, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_or_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_or_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_or_b32 v0, v1, s[2:3] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw or ptr %gep0, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_or_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_or_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_or_b32 v0, v1, s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw or ptr %gep1, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_or_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB42_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB42_4
+; GFX1250-SDAG-NEXT: .LBB42_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB42_5
+; GFX1250-SDAG-NEXT: .LBB42_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_or_b64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB42_2
+; GFX1250-SDAG-NEXT: .LBB42_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX1250-SDAG-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB42_5
+; GFX1250-SDAG-NEXT: .LBB42_5:
+;
+; GFX1250-GISEL-LABEL: flat_or_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB42_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB42_4
+; GFX1250-GISEL-NEXT: .LBB42_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB42_5
+; GFX1250-GISEL-NEXT: .LBB42_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB42_2
+; GFX1250-GISEL-NEXT: .LBB42_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_or_b32_e32 v2, v0, v4
+; GFX1250-GISEL-NEXT: v_or_b32_e32 v3, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB42_5
+; GFX1250-GISEL-NEXT: .LBB42_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw or ptr %gep0, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_or_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB43_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB43_4
+; GFX1250-SDAG-NEXT: .LBB43_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB43_5
+; GFX1250-SDAG-NEXT: .LBB43_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_or_b64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB43_2
+; GFX1250-SDAG-NEXT: .LBB43_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX1250-SDAG-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB43_5
+; GFX1250-SDAG-NEXT: .LBB43_5:
+;
+; GFX1250-GISEL-LABEL: flat_or_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB43_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB43_4
+; GFX1250-GISEL-NEXT: .LBB43_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB43_5
+; GFX1250-GISEL-NEXT: .LBB43_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB43_2
+; GFX1250-GISEL-NEXT: .LBB43_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_or_b32_e32 v2, v0, v4
+; GFX1250-GISEL-NEXT: v_or_b32_e32 v3, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB43_5
+; GFX1250-GISEL-NEXT: .LBB43_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw or ptr %gep1, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_or_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB44_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB44_4
+; GFX1250-SDAG-NEXT: .LBB44_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB44_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_or_b64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB44_2
+; GFX1250-SDAG-NEXT: .LBB44_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX1250-SDAG-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_or_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB44_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB44_4
+; GFX1250-GISEL-NEXT: .LBB44_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB44_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB44_2
+; GFX1250-GISEL-NEXT: .LBB44_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX1250-GISEL-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw or ptr %gep0, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_or_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB45_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB45_4
+; GFX1250-SDAG-NEXT: .LBB45_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB45_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_or_b64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB45_2
+; GFX1250-SDAG-NEXT: .LBB45_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX1250-SDAG-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_or_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB45_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB45_4
+; GFX1250-GISEL-NEXT: .LBB45_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB45_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB45_2
+; GFX1250-GISEL-NEXT: .LBB45_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX1250-GISEL-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw or ptr %gep1, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw xor
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_xor_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_xor_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_xor_b32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw xor ptr %gep0, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_xor_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_xor_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_xor_b32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw xor ptr %gep1, i32 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_xor_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_xor_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_xor_b32 v0, v1, s[2:3] scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw xor ptr %gep0, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_xor_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_xor_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_xor_b32 v0, v1, s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw xor ptr %gep1, i32 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_xor_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB50_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB50_4
+; GFX1250-SDAG-NEXT: .LBB50_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB50_5
+; GFX1250-SDAG-NEXT: .LBB50_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_xor_b64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB50_2
+; GFX1250-SDAG-NEXT: .LBB50_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v3, v1, v3
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB50_5
+; GFX1250-SDAG-NEXT: .LBB50_5:
+;
+; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB50_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB50_4
+; GFX1250-GISEL-NEXT: .LBB50_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB50_5
+; GFX1250-GISEL-NEXT: .LBB50_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB50_2
+; GFX1250-GISEL-NEXT: .LBB50_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v2, v0, v4
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v3, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB50_5
+; GFX1250-GISEL-NEXT: .LBB50_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw xor ptr %gep0, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_xor_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB51_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB51_4
+; GFX1250-SDAG-NEXT: .LBB51_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB51_5
+; GFX1250-SDAG-NEXT: .LBB51_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_xor_b64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB51_2
+; GFX1250-SDAG-NEXT: .LBB51_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v3, v1, v3
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB51_5
+; GFX1250-SDAG-NEXT: .LBB51_5:
+;
+; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB51_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB51_4
+; GFX1250-GISEL-NEXT: .LBB51_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB51_5
+; GFX1250-GISEL-NEXT: .LBB51_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB51_2
+; GFX1250-GISEL-NEXT: .LBB51_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v2, v0, v4
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v3, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB51_5
+; GFX1250-GISEL-NEXT: .LBB51_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw xor ptr %gep1, i64 %data syncscope("agent") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_xor_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB52_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB52_4
+; GFX1250-SDAG-NEXT: .LBB52_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB52_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_xor_b64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB52_2
+; GFX1250-SDAG-NEXT: .LBB52_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB52_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB52_4
+; GFX1250-GISEL-NEXT: .LBB52_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB52_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB52_2
+; GFX1250-GISEL-NEXT: .LBB52_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw xor ptr %gep0, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_xor_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB53_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB53_4
+; GFX1250-SDAG-NEXT: .LBB53_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB53_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_xor_b64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB53_2
+; GFX1250-SDAG-NEXT: .LBB53_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB53_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB53_4
+; GFX1250-GISEL-NEXT: .LBB53_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB53_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB53_2
+; GFX1250-GISEL-NEXT: .LBB53_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw xor ptr %gep1, i64 %data syncscope("agent") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw max
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_max_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_max_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_max_i32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw max ptr %gep0, i32 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_max_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_max_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_max_i32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw max ptr %gep1, i32 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_max_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_max_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_max_i32 v0, v1, s[2:3]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw max ptr %gep0, i32 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_max_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_max_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_max_i32 v0, v1, s[2:3] offset:-128
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw max ptr %gep1, i32 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_max_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB58_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB58_4
+; GFX1250-SDAG-NEXT: .LBB58_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB58_5
+; GFX1250-SDAG-NEXT: .LBB58_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_max_i64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB58_2
+; GFX1250-SDAG-NEXT: .LBB58_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB58_5
+; GFX1250-SDAG-NEXT: .LBB58_5:
+;
+; GFX1250-GISEL-LABEL: flat_max_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB58_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB58_4
+; GFX1250-GISEL-NEXT: .LBB58_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB58_5
+; GFX1250-GISEL-NEXT: .LBB58_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB58_2
+; GFX1250-GISEL-NEXT: .LBB58_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB58_5
+; GFX1250-GISEL-NEXT: .LBB58_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw max ptr %gep0, i64 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_max_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB59_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB59_4
+; GFX1250-SDAG-NEXT: .LBB59_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB59_5
+; GFX1250-SDAG-NEXT: .LBB59_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_max_i64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB59_2
+; GFX1250-SDAG-NEXT: .LBB59_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB59_5
+; GFX1250-SDAG-NEXT: .LBB59_5:
+;
+; GFX1250-GISEL-LABEL: flat_max_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB59_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB59_4
+; GFX1250-GISEL-NEXT: .LBB59_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB59_5
+; GFX1250-GISEL-NEXT: .LBB59_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB59_2
+; GFX1250-GISEL-NEXT: .LBB59_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB59_5
+; GFX1250-GISEL-NEXT: .LBB59_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw max ptr %gep1, i64 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_max_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB60_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB60_4
+; GFX1250-SDAG-NEXT: .LBB60_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB60_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_max_i64 v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB60_2
+; GFX1250-SDAG-NEXT: .LBB60_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_max_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB60_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB60_4
+; GFX1250-GISEL-NEXT: .LBB60_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB60_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v0, v[4:5], s[2:3]
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB60_2
+; GFX1250-GISEL-NEXT: .LBB60_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw max ptr %gep0, i64 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_max_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB61_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB61_4
+; GFX1250-SDAG-NEXT: .LBB61_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB61_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_max_i64 v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB61_2
+; GFX1250-SDAG-NEXT: .LBB61_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_max_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB61_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB61_4
+; GFX1250-GISEL-NEXT: .LBB61_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB61_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v0, v[4:5], s[2:3] offset:-128
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB61_2
+; GFX1250-GISEL-NEXT: .LBB61_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw max ptr %gep1, i64 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw min
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_min_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_min_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_min_i32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw min ptr %gep0, i32 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_min_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_min_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_min_i32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw min ptr %gep1, i32 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_min_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_min_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_min_i32 v0, v1, s[2:3]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw min ptr %gep0, i32 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_min_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_min_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_min_i32 v0, v1, s[2:3] offset:-128
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw min ptr %gep1, i32 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_min_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB66_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB66_4
+; GFX1250-SDAG-NEXT: .LBB66_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB66_5
+; GFX1250-SDAG-NEXT: .LBB66_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_min_i64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB66_2
+; GFX1250-SDAG-NEXT: .LBB66_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_le_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB66_5
+; GFX1250-SDAG-NEXT: .LBB66_5:
+;
+; GFX1250-GISEL-LABEL: flat_min_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB66_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB66_4
+; GFX1250-GISEL-NEXT: .LBB66_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB66_5
+; GFX1250-GISEL-NEXT: .LBB66_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB66_2
+; GFX1250-GISEL-NEXT: .LBB66_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB66_5
+; GFX1250-GISEL-NEXT: .LBB66_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw min ptr %gep0, i64 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_min_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB67_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB67_4
+; GFX1250-SDAG-NEXT: .LBB67_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB67_5
+; GFX1250-SDAG-NEXT: .LBB67_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_min_i64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB67_2
+; GFX1250-SDAG-NEXT: .LBB67_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_le_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB67_5
+; GFX1250-SDAG-NEXT: .LBB67_5:
+;
+; GFX1250-GISEL-LABEL: flat_min_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB67_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB67_4
+; GFX1250-GISEL-NEXT: .LBB67_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB67_5
+; GFX1250-GISEL-NEXT: .LBB67_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB67_2
+; GFX1250-GISEL-NEXT: .LBB67_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB67_5
+; GFX1250-GISEL-NEXT: .LBB67_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw min ptr %gep1, i64 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_min_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB68_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB68_4
+; GFX1250-SDAG-NEXT: .LBB68_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB68_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_min_i64 v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB68_2
+; GFX1250-SDAG-NEXT: .LBB68_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_le_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_min_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB68_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB68_4
+; GFX1250-GISEL-NEXT: .LBB68_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB68_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v0, v[4:5], s[2:3]
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB68_2
+; GFX1250-GISEL-NEXT: .LBB68_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw min ptr %gep0, i64 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_min_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB69_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB69_4
+; GFX1250-SDAG-NEXT: .LBB69_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB69_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_min_i64 v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB69_2
+; GFX1250-SDAG-NEXT: .LBB69_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_le_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_min_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB69_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB69_4
+; GFX1250-GISEL-NEXT: .LBB69_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB69_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v0, v[4:5], s[2:3] offset:-128
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB69_2
+; GFX1250-GISEL-NEXT: .LBB69_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw min ptr %gep1, i64 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw umax
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_umax_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_umax_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_max_u32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw umax ptr %gep0, i32 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_umax_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_umax_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_max_u32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw umax ptr %gep1, i32 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_umax_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_umax_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_max_u32 v0, v1, s[2:3]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw umax ptr %gep0, i32 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_umax_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_umax_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_max_u32 v0, v1, s[2:3] offset:-128
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw umax ptr %gep1, i32 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_umax_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB74_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB74_4
+; GFX1250-SDAG-NEXT: .LBB74_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB74_5
+; GFX1250-SDAG-NEXT: .LBB74_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_max_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB74_2
+; GFX1250-SDAG-NEXT: .LBB74_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB74_5
+; GFX1250-SDAG-NEXT: .LBB74_5:
+;
+; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB74_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB74_4
+; GFX1250-GISEL-NEXT: .LBB74_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB74_5
+; GFX1250-GISEL-NEXT: .LBB74_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB74_2
+; GFX1250-GISEL-NEXT: .LBB74_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB74_5
+; GFX1250-GISEL-NEXT: .LBB74_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw umax ptr %gep0, i64 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_umax_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB75_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB75_4
+; GFX1250-SDAG-NEXT: .LBB75_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB75_5
+; GFX1250-SDAG-NEXT: .LBB75_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_max_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB75_2
+; GFX1250-SDAG-NEXT: .LBB75_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB75_5
+; GFX1250-SDAG-NEXT: .LBB75_5:
+;
+; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB75_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB75_4
+; GFX1250-GISEL-NEXT: .LBB75_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB75_5
+; GFX1250-GISEL-NEXT: .LBB75_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB75_2
+; GFX1250-GISEL-NEXT: .LBB75_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB75_5
+; GFX1250-GISEL-NEXT: .LBB75_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw umax ptr %gep1, i64 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_umax_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB76_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB76_4
+; GFX1250-SDAG-NEXT: .LBB76_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB76_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_max_u64 v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB76_2
+; GFX1250-SDAG-NEXT: .LBB76_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB76_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB76_4
+; GFX1250-GISEL-NEXT: .LBB76_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB76_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v0, v[4:5], s[2:3]
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB76_2
+; GFX1250-GISEL-NEXT: .LBB76_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw umax ptr %gep0, i64 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_umax_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB77_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB77_4
+; GFX1250-SDAG-NEXT: .LBB77_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB77_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_max_u64 v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB77_2
+; GFX1250-SDAG-NEXT: .LBB77_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB77_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB77_4
+; GFX1250-GISEL-NEXT: .LBB77_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB77_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v0, v[4:5], s[2:3] offset:-128
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB77_2
+; GFX1250-GISEL-NEXT: .LBB77_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw umax ptr %gep1, i64 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw umin
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_umin_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_umin_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_min_u32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw umin ptr %gep0, i32 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_umin_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_umin_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_min_u32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw umin ptr %gep1, i32 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_umin_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_umin_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_min_u32 v0, v1, s[2:3]
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw umin ptr %gep0, i32 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_umin_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_umin_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_min_u32 v0, v1, s[2:3] offset:-128
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw umin ptr %gep1, i32 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_umin_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB82_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB82_4
+; GFX1250-SDAG-NEXT: .LBB82_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB82_5
+; GFX1250-SDAG-NEXT: .LBB82_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_min_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB82_2
+; GFX1250-SDAG-NEXT: .LBB82_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_le_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB82_5
+; GFX1250-SDAG-NEXT: .LBB82_5:
+;
+; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB82_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB82_4
+; GFX1250-GISEL-NEXT: .LBB82_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB82_5
+; GFX1250-GISEL-NEXT: .LBB82_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB82_2
+; GFX1250-GISEL-NEXT: .LBB82_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB82_5
+; GFX1250-GISEL-NEXT: .LBB82_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw umin ptr %gep0, i64 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_umin_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB83_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB83_4
+; GFX1250-SDAG-NEXT: .LBB83_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB83_5
+; GFX1250-SDAG-NEXT: .LBB83_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_min_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB83_2
+; GFX1250-SDAG-NEXT: .LBB83_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_le_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v3, v1 :: v_dual_cndmask_b32 v2, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB83_5
+; GFX1250-SDAG-NEXT: .LBB83_5:
+;
+; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB83_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB83_4
+; GFX1250-GISEL-NEXT: .LBB83_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB83_5
+; GFX1250-GISEL-NEXT: .LBB83_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB83_2
+; GFX1250-GISEL-NEXT: .LBB83_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v0 :: v_dual_cndmask_b32 v3, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB83_5
+; GFX1250-GISEL-NEXT: .LBB83_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw umin ptr %gep1, i64 %data syncscope("workgroup") seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_umin_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB84_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB84_4
+; GFX1250-SDAG-NEXT: .LBB84_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB84_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_min_u64 v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB84_2
+; GFX1250-SDAG-NEXT: .LBB84_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_le_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB84_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB84_4
+; GFX1250-GISEL-NEXT: .LBB84_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB84_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v0, v[4:5], s[2:3]
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB84_2
+; GFX1250-GISEL-NEXT: .LBB84_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw umin ptr %gep0, i64 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_umin_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB85_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB85_4
+; GFX1250-SDAG-NEXT: .LBB85_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB85_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_min_u64 v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB85_2
+; GFX1250-SDAG-NEXT: .LBB85_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_le_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v3, v1 :: v_dual_cndmask_b32 v0, v2, v0
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB85_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB85_4
+; GFX1250-GISEL-NEXT: .LBB85_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB85_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v0, v[4:5], s[2:3] offset:-128
+; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB85_2
+; GFX1250-GISEL-NEXT: .LBB85_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw umin ptr %gep1, i64 %data syncscope("workgroup") seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; cmpxchg
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_cmpxchg_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %cmp, i32 %data) {
+; GFX1250-LABEL: flat_cmpxchg_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b32_e32 v3, v1
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v0, v0, v[2:3], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %cmpxchg = cmpxchg ptr %gep0, i32 %cmp, i32 %data seq_cst seq_cst
+ %rtn = extractvalue { i32, i1 } %cmpxchg, 0
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_cmpxchg_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %cmp, i32 %data) {
+; GFX1250-LABEL: flat_cmpxchg_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b32_e32 v3, v1
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v0, v0, v[2:3], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %cmpxchg = cmpxchg ptr %gep1, i32 %cmp, i32 %data seq_cst seq_cst
+ %rtn = extractvalue { i32, i1 } %cmpxchg, 0
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_cmpxchg_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %cmp, i32 %data) {
+; GFX1250-LABEL: flat_cmpxchg_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b32_e32 v3, v1
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v0, v[2:3], s[2:3] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = cmpxchg ptr %gep0, i32 %cmp, i32 %data seq_cst seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_cmpxchg_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %cmp, i32 %data) {
+; GFX1250-LABEL: flat_cmpxchg_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b32_e32 v3, v1
+; GFX1250-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v0, v[2:3], s[2:3] offset:-128 scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = cmpxchg ptr %gep1, i32 %cmp, i32 %data seq_cst seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %cmp, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[2:3], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB90_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB90_4
+; GFX1250-SDAG-NEXT: .LBB90_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB90_5
+; GFX1250-SDAG-NEXT: .LBB90_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[2:3], v[4:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB90_2
+; GFX1250-SDAG-NEXT: .LBB90_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v2, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v1, v5 :: v_dual_cndmask_b32 v2, v0, v4
+; GFX1250-SDAG-NEXT: scratch_store_b64 v8, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB90_5
+; GFX1250-SDAG-NEXT: .LBB90_5:
+;
+; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v0, v5
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB90_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB90_4
+; GFX1250-GISEL-NEXT: .LBB90_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB90_5
+; GFX1250-GISEL-NEXT: .LBB90_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v[0:1], v5, v[6:9], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB90_2
+; GFX1250-GISEL-NEXT: .LBB90_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v0, v6 :: v_dual_cndmask_b32 v3, v1, v7
+; GFX1250-GISEL-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB90_5
+; GFX1250-GISEL-NEXT: .LBB90_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %cmpxchg = cmpxchg ptr %gep0, i64 %cmp, i64 %data seq_cst seq_cst
+ %rtn = extractvalue { i64, i1 } %cmpxchg, 0
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %cmp, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB91_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB91_4
+; GFX1250-SDAG-NEXT: .LBB91_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB91_5
+; GFX1250-SDAG-NEXT: .LBB91_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[2:3], v[4:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB91_2
+; GFX1250-SDAG-NEXT: .LBB91_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v2, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v1, v5 :: v_dual_cndmask_b32 v2, v0, v4
+; GFX1250-SDAG-NEXT: scratch_store_b64 v8, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB91_5
+; GFX1250-SDAG-NEXT: .LBB91_5:
+;
+; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v5
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB91_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB91_4
+; GFX1250-GISEL-NEXT: .LBB91_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB91_5
+; GFX1250-GISEL-NEXT: .LBB91_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v[0:1], v5, v[6:9], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB91_2
+; GFX1250-GISEL-NEXT: .LBB91_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v0, v6 :: v_dual_cndmask_b32 v3, v1, v7
+; GFX1250-GISEL-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB91_5
+; GFX1250-GISEL-NEXT: .LBB91_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %cmpxchg = cmpxchg ptr %gep1, i64 %cmp, i64 %data seq_cst seq_cst
+ %rtn = extractvalue { i64, i1 } %cmpxchg, 0
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %cmp, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB92_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB92_4
+; GFX1250-SDAG-NEXT: .LBB92_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB92_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[4:7] scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB92_2
+; GFX1250-SDAG-NEXT: .LBB92_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v1, v5 :: v_dual_cndmask_b32 v0, v0, v4
+; GFX1250-SDAG-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB92_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB92_4
+; GFX1250-GISEL-NEXT: .LBB92_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB92_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v0, v[6:9], s[2:3] scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB92_2
+; GFX1250-GISEL-NEXT: .LBB92_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_cndmask_b32 v1, v1, v7
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = cmpxchg ptr %gep0, i64 %cmp, i64 %data seq_cst seq_cst
+ ret void
+}
+
+define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %cmp, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB93_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB93_4
+; GFX1250-SDAG-NEXT: .LBB93_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB93_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[4:7] scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB93_2
+; GFX1250-SDAG-NEXT: .LBB93_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v1, v5 :: v_dual_cndmask_b32 v0, v0, v4
+; GFX1250-SDAG-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB93_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB93_4
+; GFX1250-GISEL-NEXT: .LBB93_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB93_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: global_wb scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v0, v[6:9], s[2:3] offset:-128 scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB93_2
+; GFX1250-GISEL-NEXT: .LBB93_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v0, v6 :: v_dual_cndmask_b32 v1, v1, v7
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = cmpxchg ptr %gep1, i64 %cmp, i64 %data seq_cst seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; amdgcn atomic inc
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @flat_inc_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_inc_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_inc_u32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw uinc_wrap ptr %gep0, i32 %data syncscope("agent") monotonic
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_inc_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_inc_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_inc_u32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw uinc_wrap ptr %gep1, i32 %data syncscope("agent") monotonic
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_inc_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_inc_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_inc_u32 v0, v1, s[2:3] scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw uinc_wrap ptr %gep0, i32 %data syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @flat_inc_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_inc_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_inc_u32 v0, v1, s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw uinc_wrap ptr %gep1, i32 %data syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_inc_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB98_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB98_4
+; GFX1250-SDAG-NEXT: .LBB98_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB98_5
+; GFX1250-SDAG-NEXT: .LBB98_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_inc_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB98_2
+; GFX1250-SDAG-NEXT: .LBB98_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, 1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB98_5
+; GFX1250-SDAG-NEXT: .LBB98_5:
+;
+; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB98_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB98_4
+; GFX1250-GISEL-NEXT: .LBB98_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB98_5
+; GFX1250-GISEL-NEXT: .LBB98_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB98_2
+; GFX1250-GISEL-NEXT: .LBB98_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, 1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB98_5
+; GFX1250-GISEL-NEXT: .LBB98_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw uinc_wrap ptr %gep0, i64 %data syncscope("agent") monotonic
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_inc_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB99_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB99_4
+; GFX1250-SDAG-NEXT: .LBB99_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB99_5
+; GFX1250-SDAG-NEXT: .LBB99_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_inc_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB99_2
+; GFX1250-SDAG-NEXT: .LBB99_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, 1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_branch .LBB99_5
+; GFX1250-SDAG-NEXT: .LBB99_5:
+;
+; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB99_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB99_4
+; GFX1250-GISEL-NEXT: .LBB99_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB99_5
+; GFX1250-GISEL-NEXT: .LBB99_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB99_2
+; GFX1250-GISEL-NEXT: .LBB99_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, 1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_branch .LBB99_5
+; GFX1250-GISEL-NEXT: .LBB99_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw uinc_wrap ptr %gep1, i64 %data syncscope("agent") monotonic
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_inc_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB100_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB100_4
+; GFX1250-SDAG-NEXT: .LBB100_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB100_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB100_2
+; GFX1250-SDAG-NEXT: .LBB100_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, 1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, 0, v5 :: v_dual_cndmask_b32 v0, 0, v4
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB100_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB100_4
+; GFX1250-GISEL-NEXT: .LBB100_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB100_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB100_2
+; GFX1250-GISEL-NEXT: .LBB100_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, 1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw uinc_wrap ptr %gep0, i64 %data syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_inc_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB101_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB101_4
+; GFX1250-SDAG-NEXT: .LBB101_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB101_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB101_2
+; GFX1250-SDAG-NEXT: .LBB101_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, 1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffd
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, 0, v5 :: v_dual_cndmask_b32 v0, 0, v4
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB101_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB101_4
+; GFX1250-GISEL-NEXT: .LBB101_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB101_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB101_2
+; GFX1250-GISEL-NEXT: .LBB101_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, 1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw uinc_wrap ptr %gep1, i64 %data syncscope("agent") monotonic
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; amdgcn atomic dec
+; --------------------------------------------------------------------------------
+
+
+define amdgpu_ps float @flat_dec_saddr_i32_rtn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_dec_saddr_i32_rtn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_dec_u32 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw udec_wrap ptr %gep0, i32 %data syncscope("agent") monotonic
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @flat_dec_saddr_i32_rtn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_dec_saddr_i32_rtn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_dec_u32 v0, v0, v1, s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw udec_wrap ptr %gep1, i32 %data syncscope("agent") monotonic
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @flat_dec_saddr_i32_nortn(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_dec_saddr_i32_nortn:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_dec_u32 v0, v1, s[2:3] scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw udec_wrap ptr %gep0, i32 %data syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @flat_dec_saddr_i32_nortn_neg128(ptr inreg %sbase, i32 %voffset, i32 %data) {
+; GFX1250-LABEL: flat_dec_saddr_i32_nortn_neg128:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_atomic_dec_u32 v0, v1, s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw udec_wrap ptr %gep1, i32 %data syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_dec_saddr_i64_rtn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB106_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB106_4
+; GFX1250-SDAG-NEXT: .LBB106_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB106_5
+; GFX1250-SDAG-NEXT: .LBB106_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_dec_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB106_2
+; GFX1250-SDAG-NEXT: .LBB106_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e64 s0, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, -1
+; GFX1250-SDAG-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v5, v3 :: v_dual_cndmask_b32 v2, v4, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-SDAG-NEXT: s_branch .LBB106_5
+; GFX1250-SDAG-NEXT: .LBB106_5:
+;
+; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_rtn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB106_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB106_4
+; GFX1250-GISEL-NEXT: .LBB106_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB106_5
+; GFX1250-GISEL-NEXT: .LBB106_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB106_2
+; GFX1250-GISEL-NEXT: .LBB106_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e64 s0, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, -1
+; GFX1250-GISEL-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_cndmask_b32 v3, v3, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-GISEL-NEXT: s_branch .LBB106_5
+; GFX1250-GISEL-NEXT: .LBB106_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %rtn = atomicrmw udec_wrap ptr %gep0, i64 %data syncscope("agent") monotonic
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_dec_saddr_i64_rtn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB107_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB107_4
+; GFX1250-SDAG-NEXT: .LBB107_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: s_branch .LBB107_5
+; GFX1250-SDAG-NEXT: .LBB107_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_dec_u64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB107_2
+; GFX1250-SDAG-NEXT: .LBB107_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e64 s0, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, -1
+; GFX1250-SDAG-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, v5, v3 :: v_dual_cndmask_b32 v2, v4, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-SDAG-NEXT: s_branch .LBB107_5
+; GFX1250-SDAG-NEXT: .LBB107_5:
+;
+; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_rtn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB107_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB107_4
+; GFX1250-GISEL-NEXT: .LBB107_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_branch .LBB107_5
+; GFX1250-GISEL-NEXT: .LBB107_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB107_2
+; GFX1250-GISEL-NEXT: .LBB107_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e64 s0, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, -1
+; GFX1250-GISEL-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v2, v2, v4 :: v_dual_cndmask_b32 v3, v3, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-GISEL-NEXT: s_branch .LBB107_5
+; GFX1250-GISEL-NEXT: .LBB107_5:
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %rtn = atomicrmw udec_wrap ptr %gep1, i64 %data syncscope("agent") monotonic
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_dec_saddr_i64_nortn:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB108_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB108_4
+; GFX1250-SDAG-NEXT: .LBB108_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB108_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_dec_u64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB108_2
+; GFX1250-SDAG-NEXT: .LBB108_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e64 s0, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, -1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v1, v3 :: v_dual_cndmask_b32 v0, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_nortn:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB108_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB108_4
+; GFX1250-GISEL-NEXT: .LBB108_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB108_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB108_2
+; GFX1250-GISEL-NEXT: .LBB108_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e64 s0, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, -1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_cndmask_b32 v1, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %unused = atomicrmw udec_wrap ptr %gep0, i64 %data syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %voffset, i64 %data) {
+; GFX1250-SDAG-LABEL: flat_dec_saddr_i64_nortn_neg128:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], s[2:3], 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB109_3
+; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB109_4
+; GFX1250-SDAG-NEXT: .LBB109_2: ; %atomicrmw.phi
+; GFX1250-SDAG-NEXT: s_endpgm
+; GFX1250-SDAG-NEXT: .LBB109_3: ; %atomicrmw.global
+; GFX1250-SDAG-NEXT: flat_atomic_dec_u64 v[0:1], v[2:3] scope:SCOPE_DEV
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB109_2
+; GFX1250-SDAG-NEXT: .LBB109_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-SDAG-NEXT: v_cmp_gt_u64_e64 s0, v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, -1
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
+; GFX1250-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, v1, v3 :: v_dual_cndmask_b32 v0, v0, v2
+; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[0:1], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_nortn_neg128:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB109_3
+; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB109_4
+; GFX1250-GISEL-NEXT: .LBB109_2: ; %atomicrmw.phi
+; GFX1250-GISEL-NEXT: s_endpgm
+; GFX1250-GISEL-NEXT: .LBB109_3: ; %atomicrmw.global
+; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB109_2
+; GFX1250-GISEL-NEXT: .LBB109_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffd
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-GISEL-NEXT: v_cmp_gt_u64_e64 s0, v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, -1
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
+; GFX1250-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_cndmask_b32 v1, v1, v5
+; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[0:1], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, ptr %gep0, i64 -128
+ %unused = atomicrmw udec_wrap ptr %gep1, i64 %data syncscope("agent") monotonic
+ ret void
+}
+
+attributes #0 = { argmemonly nounwind willreturn }
>From 7a9a76a9300c82b629d1f598c78088bee9b3d6d4 Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Fri, 18 Jul 2025 14:36:09 -0500
Subject: [PATCH 12/12] [libc] Fix GPU benchmarking
---
libc/benchmarks/gpu/LibcGpuBenchmark.cpp | 2 +-
.../gpu/src/math/atan2_benchmark.cpp | 24 ++++----
libc/benchmarks/gpu/src/math/platform.h | 57 +++++++++++++++++++
.../benchmarks/gpu/src/math/sin_benchmark.cpp | 40 ++++++-------
libc/benchmarks/gpu/timing/amdgpu/timing.h | 21 +++----
libc/benchmarks/gpu/timing/nvptx/timing.h | 17 +++---
6 files changed, 106 insertions(+), 55 deletions(-)
create mode 100644 libc/benchmarks/gpu/src/math/platform.h
diff --git a/libc/benchmarks/gpu/LibcGpuBenchmark.cpp b/libc/benchmarks/gpu/LibcGpuBenchmark.cpp
index 920c5b206b0fe..57ff5b9fdb846 100644
--- a/libc/benchmarks/gpu/LibcGpuBenchmark.cpp
+++ b/libc/benchmarks/gpu/LibcGpuBenchmark.cpp
@@ -7,9 +7,9 @@
#include "src/__support/GPU/utils.h"
#include "src/__support/fixedvector.h"
#include "src/__support/macros/config.h"
+#include "src/__support/time/gpu/time_utils.h"
#include "src/stdio/printf.h"
#include "src/stdlib/srand.h"
-#include "src/time/gpu/time_utils.h"
namespace LIBC_NAMESPACE_DECL {
namespace benchmarks {
diff --git a/libc/benchmarks/gpu/src/math/atan2_benchmark.cpp b/libc/benchmarks/gpu/src/math/atan2_benchmark.cpp
index 3bb5b0cc6788c..1f91a9a35c373 100644
--- a/libc/benchmarks/gpu/src/math/atan2_benchmark.cpp
+++ b/libc/benchmarks/gpu/src/math/atan2_benchmark.cpp
@@ -3,12 +3,8 @@
#include "src/math/atan2.h"
#include "src/stdlib/rand.h"
-#ifdef NVPTX_MATH_FOUND
-#include "src/math/nvptx/declarations.h"
-#endif
-
-#ifdef AMDGPU_MATH_FOUND
-#include "src/math/amdgpu/declarations.h"
+#if defined(NVPTX_MATH_FOUND) || defined(AMDGPU_MATH_FOUND)
+#include "platform.h"
#endif
#define BM_TWO_RANDOM_INPUT(T, Func, MIN_EXP, MAX_EXP, N) \
@@ -33,15 +29,15 @@ BENCH(double, Atan2TwoPow30, LIBC_NAMESPACE::atan2, 0, 30);
BENCH(double, Atan2Large, LIBC_NAMESPACE::atan2, 30, 1000);
#ifdef NVPTX_MATH_FOUND
-BENCH(double, NvAtan2, LIBC_NAMESPACE::__nv_atan2, -1023, 1023);
-BENCH(double, NvAtan2TwoPi, LIBC_NAMESPACE::__nv_atan2, -10, 3);
-BENCH(double, NvAtan2TwoPow30, LIBC_NAMESPACE::__nv_atan2, 0, 30);
-BENCH(double, NvAtan2Large, LIBC_NAMESPACE::__nv_atan2, 30, 1000);
+BENCH(double, NvAtan2, __nv_atan2, -1023, 1023);
+BENCH(double, NvAtan2TwoPi, __nv_atan2, -10, 3);
+BENCH(double, NvAtan2TwoPow30, __nv_atan2, 0, 30);
+BENCH(double, NvAtan2Large, __nv_atan2, 30, 1000);
#endif
#ifdef AMDGPU_MATH_FOUND
-BENCH(double, AmdAtan2, LIBC_NAMESPACE::__ocml_atan2_f64, -1023, 1023);
-BENCH(double, AmdAtan2TwoPi, LIBC_NAMESPACE::__ocml_atan2_f64, -10, 3);
-BENCH(double, AmdAtan2TwoPow30, LIBC_NAMESPACE::__ocml_atan2_f64, 0, 30);
-BENCH(double, AmdAtan2Large, LIBC_NAMESPACE::__ocml_atan2_f64, 30, 1000);
+BENCH(double, AmdAtan2, __ocml_atan2_f64, -1023, 1023);
+BENCH(double, AmdAtan2TwoPi, __ocml_atan2_f64, -10, 3);
+BENCH(double, AmdAtan2TwoPow30, __ocml_atan2_f64, 0, 30);
+BENCH(double, AmdAtan2Large, __ocml_atan2_f64, 30, 1000);
#endif
diff --git a/libc/benchmarks/gpu/src/math/platform.h b/libc/benchmarks/gpu/src/math/platform.h
new file mode 100644
index 0000000000000..bb7825d38bd42
--- /dev/null
+++ b/libc/benchmarks/gpu/src/math/platform.h
@@ -0,0 +1,57 @@
+//===-- AMDGPU specific platform definitions for math support -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC_MATH_AMDGPU_PLATFORM_H
+#define LLVM_LIBC_SRC_MATH_AMDGPU_PLATFORM_H
+#include "src/__support/macros/attributes.h"
+#include "src/__support/macros/config.h"
+#include <stdint.h>
+
+namespace LIBC_NAMESPACE_DECL {
+
+#ifdef LIBC_TARGET_ARCH_IS_AMDGPU
+// The ROCm device library uses control globals to alter codegen for the
+// different targets. To avoid needing to link them in manually we simply
+// define them here.
+extern "C" {
+extern const LIBC_INLINE_VAR uint8_t __oclc_unsafe_math_opt = 0;
+extern const LIBC_INLINE_VAR uint8_t __oclc_daz_opt = 0;
+extern const LIBC_INLINE_VAR uint8_t __oclc_correctly_rounded_sqrt32 = 1;
+extern const LIBC_INLINE_VAR uint8_t __oclc_finite_only_opt = 0;
+extern const LIBC_INLINE_VAR uint32_t __oclc_ISA_version = 9000;
+}
+
+// These aliases cause clang to emit the control constants with ODR linkage.
+// This allows us to link against the symbols without preventing them from being
+// optimized out or causing symbol collisions.
+[[gnu::alias("__oclc_unsafe_math_opt")]] const uint8_t __oclc_unsafe_math_opt__;
+[[gnu::alias("__oclc_daz_opt")]] const uint8_t __oclc_daz_opt__;
+[[gnu::alias("__oclc_correctly_rounded_sqrt32")]] const uint8_t
+ __oclc_correctly_rounded_sqrt32__;
+[[gnu::alias("__oclc_finite_only_opt")]] const uint8_t __oclc_finite_only_opt__;
+[[gnu::alias("__oclc_ISA_version")]] const uint32_t __oclc_ISA_version__;
+#endif
+} // namespace LIBC_NAMESPACE_DECL
+
+// Forward declarations for the vendor math libraries.
+extern "C" {
+#ifdef AMDGPU_MATH_FOUND
+double __ocml_sin_f64(double);
+float __ocml_sin_f32(float);
+double __ocml_atan2_f64(double, double);
+float __ocml_atan2_f32(float, float);
+#endif
+
+#ifdef NVPTX_MATH_FOUND
+double __nv_sin(double);
+float __nv_sinf(float);
+double __nv_atan2(double, double);
+float __nv_atan2f(float, float);
+#endif
+}
+
+#endif // LLVM_LIBC_SRC_MATH_AMDGPU_PLATFORM_H
diff --git a/libc/benchmarks/gpu/src/math/sin_benchmark.cpp b/libc/benchmarks/gpu/src/math/sin_benchmark.cpp
index bf09e6e462172..a759db2e9d33f 100644
--- a/libc/benchmarks/gpu/src/math/sin_benchmark.cpp
+++ b/libc/benchmarks/gpu/src/math/sin_benchmark.cpp
@@ -8,12 +8,8 @@
#include "src/math/sinf.h"
#include "src/stdlib/rand.h"
-#ifdef NVPTX_MATH_FOUND
-#include "src/math/nvptx/declarations.h"
-#endif
-
-#ifdef AMDGPU_MATH_FOUND
-#include "src/math/amdgpu/declarations.h"
+#if defined(NVPTX_MATH_FOUND) || defined(AMDGPU_MATH_FOUND)
+#include "platform.h"
#endif
// BENCHMARK() expects a function that with no parameters that returns a
@@ -42,17 +38,17 @@ BENCH(double, SinTwoPow30, LIBC_NAMESPACE::sin, 0, 30);
BENCH(double, SinVeryLarge, LIBC_NAMESPACE::sin, 30, 1000);
#ifdef NVPTX_MATH_FOUND
-BENCH(double, NvSin, LIBC_NAMESPACE::__nv_sin, -1023, 1023);
-BENCH(double, NvSinTwoPi, LIBC_NAMESPACE::__nv_sin, -10, 3);
-BENCH(double, NvSinTwoPow30, LIBC_NAMESPACE::__nv_sin, 0, 30);
-BENCH(double, NvSinVeryLarge, LIBC_NAMESPACE::__nv_sin, 30, 1000);
+BENCH(double, NvSin, __nv_sin, -1023, 1023);
+BENCH(double, NvSinTwoPi, __nv_sin, -10, 3);
+BENCH(double, NvSinTwoPow30, __nv_sin, 0, 30);
+BENCH(double, NvSinVeryLarge, __nv_sin, 30, 1000);
#endif
#ifdef AMDGPU_MATH_FOUND
-BENCH(double, AmdSin, LIBC_NAMESPACE::__ocml_sin_f64, -1023, 1023);
-BENCH(double, AmdSinTwoPi, LIBC_NAMESPACE::__ocml_sin_f64, -10, 3);
-BENCH(double, AmdSinTwoPow30, LIBC_NAMESPACE::__ocml_sin_f64, 0, 30);
-BENCH(double, AmdSinVeryLarge, LIBC_NAMESPACE::__ocml_sin_f64, 30, 1000);
+BENCH(double, AmdSin, __ocml_sin_f64, -1023, 1023);
+BENCH(double, AmdSinTwoPi, __ocml_sin_f64, -10, 3);
+BENCH(double, AmdSinTwoPow30, __ocml_sin_f64, 0, 30);
+BENCH(double, AmdSinVeryLarge, __ocml_sin_f64, 30, 1000);
#endif
BENCH(float, Sinf, LIBC_NAMESPACE::sinf, -127, 128);
@@ -61,15 +57,15 @@ BENCH(float, SinfTwoPow30, LIBC_NAMESPACE::sinf, 0, 30);
BENCH(float, SinfVeryLarge, LIBC_NAMESPACE::sinf, 30, 120);
#ifdef NVPTX_MATH_FOUND
-BENCH(float, NvSinf, LIBC_NAMESPACE::__nv_sinf, -127, 128);
-BENCH(float, NvSinfTwoPi, LIBC_NAMESPACE::__nv_sinf, -10, 3);
-BENCH(float, NvSinfTwoPow30, LIBC_NAMESPACE::__nv_sinf, 0, 30);
-BENCH(float, NvSinfVeryLarge, LIBC_NAMESPACE::__nv_sinf, 30, 120);
+BENCH(float, NvSinf, __nv_sinf, -127, 128);
+BENCH(float, NvSinfTwoPi, __nv_sinf, -10, 3);
+BENCH(float, NvSinfTwoPow30, __nv_sinf, 0, 30);
+BENCH(float, NvSinfVeryLarge, __nv_sinf, 30, 120);
#endif
#ifdef AMDGPU_MATH_FOUND
-BENCH(float, AmdSinf, LIBC_NAMESPACE::__ocml_sin_f32, -127, 128);
-BENCH(float, AmdSinfTwoPi, LIBC_NAMESPACE::__ocml_sin_f32, -10, 3);
-BENCH(float, AmdSinfTwoPow30, LIBC_NAMESPACE::__ocml_sin_f32, 0, 30);
-BENCH(float, AmdSinfVeryLarge, LIBC_NAMESPACE::__ocml_sin_f32, 30, 120);
+BENCH(float, AmdSinf, __ocml_sin_f32, -127, 128);
+BENCH(float, AmdSinfTwoPi, __ocml_sin_f32, -10, 3);
+BENCH(float, AmdSinfTwoPow30, __ocml_sin_f32, 0, 30);
+BENCH(float, AmdSinfVeryLarge, __ocml_sin_f32, 30, 120);
#endif
diff --git a/libc/benchmarks/gpu/timing/amdgpu/timing.h b/libc/benchmarks/gpu/timing/amdgpu/timing.h
index 4cf7e9838add3..0f2c04c07c921 100644
--- a/libc/benchmarks/gpu/timing/amdgpu/timing.h
+++ b/libc/benchmarks/gpu/timing/amdgpu/timing.h
@@ -10,6 +10,7 @@
#define LLVM_LIBC_UTILS_GPU_TIMING_AMDGPU
#include "src/__support/CPP/array.h"
+#include "src/__support/CPP/atomic.h"
#include "src/__support/CPP/type_traits.h"
#include "src/__support/GPU/utils.h"
#include "src/__support/common.h"
@@ -24,7 +25,7 @@ namespace LIBC_NAMESPACE_DECL {
// allows us to substract the constant-time overhead from the latency to
// obtain a true result. This can vary with system load.
[[gnu::noinline]] static LIBC_INLINE uint64_t overhead() {
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
uint64_t start = gpu::processor_clock();
uint32_t result = 0.0;
asm("v_or_b32 %[v_reg], 0, %[v_reg]\n" ::[v_reg] "v"(result));
@@ -44,13 +45,13 @@ template <typename F, typename T>
T arg = storage;
// The AMDGPU architecture needs to wait on pending results.
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
// Get the current timestamp from the clock.
uint64_t start = gpu::processor_clock();
// This forces the compiler to load the input argument and run the clock
// cycle counter before the profiling region.
- asm("" ::"s"(start));
+ asm("" : "+v"(arg) : "s"(start));
// Run the function under test and return its value.
auto result = f(arg);
@@ -71,7 +72,7 @@ template <typename F, typename T>
// ordering.
uint64_t stop = gpu::processor_clock();
asm("" ::"s"(stop));
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
// Return the time elapsed.
return stop - start;
@@ -84,7 +85,7 @@ template <typename F, typename T1, typename T2>
T1 arg1 = storage1;
T2 arg2 = storage2;
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
uint64_t start = gpu::processor_clock();
asm("" ::"s"(start));
@@ -100,7 +101,7 @@ template <typename F, typename T1, typename T2>
uint64_t stop = gpu::processor_clock();
asm("" ::"s"(stop));
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
return stop - start;
}
@@ -111,7 +112,7 @@ template <typename F, typename T, size_t N>
throughput(F f, const cpp::array<T, N> &inputs) {
asm("" ::"v"(&inputs));
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
uint64_t start = gpu::processor_clock();
asm("" ::"s"(start));
@@ -124,7 +125,7 @@ throughput(F f, const cpp::array<T, N> &inputs) {
uint64_t stop = gpu::processor_clock();
asm("" ::"s"(stop));
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
// Return the time elapsed.
return stop - start;
@@ -136,7 +137,7 @@ template <typename F, typename T, size_t N>
F f, const cpp::array<T, N> &inputs1, const cpp::array<T, N> &inputs2) {
asm("" ::"v"(&inputs1), "v"(&inputs2));
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
uint64_t start = gpu::processor_clock();
asm("" ::"s"(start));
@@ -149,7 +150,7 @@ template <typename F, typename T, size_t N>
uint64_t stop = gpu::processor_clock();
asm("" ::"s"(stop));
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
// Return the time elapsed.
return stop - start;
diff --git a/libc/benchmarks/gpu/timing/nvptx/timing.h b/libc/benchmarks/gpu/timing/nvptx/timing.h
index ece7d9a6c5396..3ed97645ddc93 100644
--- a/libc/benchmarks/gpu/timing/nvptx/timing.h
+++ b/libc/benchmarks/gpu/timing/nvptx/timing.h
@@ -10,6 +10,7 @@
#define LLVM_LIBC_UTILS_GPU_TIMING_NVPTX
#include "src/__support/CPP/array.h"
+#include "src/__support/CPP/atomic.h"
#include "src/__support/CPP/type_traits.h"
#include "src/__support/GPU/utils.h"
#include "src/__support/common.h"
@@ -46,7 +47,7 @@ template <typename F, typename T>
T arg = storage;
// Get the current timestamp from the clock.
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
uint64_t start = gpu::processor_clock();
// This forces the compiler to load the input argument and run the clock cycle
@@ -63,7 +64,7 @@ template <typename F, typename T>
// Obtain the current timestamp after running the calculation and force
// ordering.
uint64_t stop = gpu::processor_clock();
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
asm("" ::"r"(stop));
volatile T output = result;
@@ -78,7 +79,7 @@ static LIBC_INLINE uint64_t latency(F f, T1 t1, T2 t2) {
T1 arg = storage;
T2 arg2 = storage2;
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
uint64_t start = gpu::processor_clock();
asm("" ::"llr"(start));
@@ -88,7 +89,7 @@ static LIBC_INLINE uint64_t latency(F f, T1 t1, T2 t2) {
asm("or.b32 %[v_reg], %[v_reg], 0;" ::[v_reg] "r"(result));
uint64_t stop = gpu::processor_clock();
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
asm("" ::"r"(stop));
volatile auto output = result;
@@ -101,7 +102,7 @@ template <typename F, typename T, size_t N>
throughput(F f, const cpp::array<T, N> &inputs) {
asm("" ::"r"(&inputs));
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
uint64_t start = gpu::processor_clock();
asm("" ::"llr"(start));
@@ -114,7 +115,7 @@ throughput(F f, const cpp::array<T, N> &inputs) {
}
uint64_t stop = gpu::processor_clock();
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
asm("" ::"r"(stop));
volatile auto output = result;
@@ -128,7 +129,7 @@ template <typename F, typename T, size_t N>
F f, const cpp::array<T, N> &inputs1, const cpp::array<T, N> &inputs2) {
asm("" ::"r"(&inputs1), "r"(&inputs2));
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
uint64_t start = gpu::processor_clock();
asm("" ::"llr"(start));
@@ -140,7 +141,7 @@ template <typename F, typename T, size_t N>
}
uint64_t stop = gpu::processor_clock();
- gpu::memory_fence();
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQ_REL);
asm("" ::"r"(stop));
volatile auto output = result;
More information about the cfe-commits
mailing list