[clang] b84721d - clang/AMDGPU: Emit atomicrmw for atomic_inc/dec builtins

Matt Arsenault via cfe-commits cfe-commits at lists.llvm.org
Fri Jun 16 17:18:55 PDT 2023


Author: Matt Arsenault
Date: 2023-06-16T20:18:50-04:00
New Revision: b84721df636747a9955408a934e713192a9cfe21

URL: https://github.com/llvm/llvm-project/commit/b84721df636747a9955408a934e713192a9cfe21
DIFF: https://github.com/llvm/llvm-project/commit/b84721df636747a9955408a934e713192a9cfe21.diff

LOG: clang/AMDGPU: Emit atomicrmw for atomic_inc/dec builtins

This makes the scope and ordering arguments actually do something.
Also add some new OpenCL tests since the existing HIP tests didn't
cover address spaces.

Added: 
    

Modified: 
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp
    clang/test/CodeGenOpenCL/builtins-amdgcn.cl

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 405ffe3837446..b87ca22cb13f2 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -17520,40 +17520,33 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
   case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
   case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
   case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
-    unsigned BuiltinAtomicOp;
-    llvm::Type *ResultType = ConvertType(E->getType());
-
+    llvm::AtomicRMWInst::BinOp BinOp;
     switch (BuiltinID) {
     case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
     case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
-      BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
+      BinOp = llvm::AtomicRMWInst::UIncWrap;
       break;
     case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
     case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
-      BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
+      BinOp = llvm::AtomicRMWInst::UDecWrap;
       break;
     }
 
     Value *Ptr = EmitScalarExpr(E->getArg(0));
     Value *Val = EmitScalarExpr(E->getArg(1));
 
-    llvm::Function *F =
-        CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
-
     ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
                             EmitScalarExpr(E->getArg(3)), AO, SSID);
 
-    // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
-    // scope as unsigned values
-    Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
-    Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
-
     QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
     bool Volatile =
-      PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
-    Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
+        PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
 
-    return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
+    llvm::AtomicRMWInst *RMW =
+        Builder.CreateAtomicRMW(BinOp, Ptr, Val, AO, SSID);
+    if (Volatile)
+      RMW->setVolatile(true);
+    return RMW;
   }
   case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtn:
   case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtnl: {

diff  --git a/clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp b/clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp
index 8701b04be8355..4f3b5a4ce7760 100644
--- a/clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp
+++ b/clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: amdgpu-registered-target
 // RUN: %clang_cc1 %s -x hip -fcuda-is-device -emit-llvm -O0 -o - \
-// RUN:   -triple=amdgcn-amd-amdhsa  | opt -S | FileCheck %s
+// RUN:   -triple=amdgcn-amd-amdhsa | FileCheck %s
 
 // CHECK-LABEL: @_Z29test_non_volatile_parameter32Pj(
 // CHECK-NEXT:  entry:
@@ -13,12 +13,12 @@
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr [[TMP0]], i32 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP3]], ptr [[RES_ASCAST]], align 4
 // CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
-// CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr [[TMP4]], i32 [[TMP6]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP7]], ptr [[RES_ASCAST]], align 4
 // CHECK-NEXT:    ret void
 //
@@ -39,12 +39,12 @@ __attribute__((device)) void test_non_volatile_parameter32(__UINT32_TYPE__ *ptr)
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr [[TMP0]], i64 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP3]], ptr [[RES_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 8
-// CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr [[TMP4]], i64 [[TMP6]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP7]], ptr [[RES_ASCAST]], align 8
 // CHECK-NEXT:    ret void
 //
@@ -65,12 +65,12 @@ __attribute__((device)) void test_non_volatile_parameter64(__UINT64_TYPE__ *ptr)
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP2:%.*]] = load volatile i32, ptr [[TMP1]], align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr [[TMP0]], i32 [[TMP2]], i32 7, i32 2, i1 true)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw volatile uinc_wrap ptr [[TMP0]], i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP3]], ptr [[RES_ASCAST]], align 4
 // CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP6:%.*]] = load volatile i32, ptr [[TMP5]], align 4
-// CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr [[TMP4]], i32 [[TMP6]], i32 7, i32 2, i1 true)
+// CHECK-NEXT:    [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP7]], ptr [[RES_ASCAST]], align 4
 // CHECK-NEXT:    ret void
 //
@@ -91,12 +91,12 @@ __attribute__((device)) void test_volatile_parameter32(volatile __UINT32_TYPE__
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP2:%.*]] = load volatile i64, ptr [[TMP1]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr [[TMP0]], i64 [[TMP2]], i32 7, i32 2, i1 true)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw volatile uinc_wrap ptr [[TMP0]], i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP3]], ptr [[RES_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP6:%.*]] = load volatile i64, ptr [[TMP5]], align 8
-// CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr [[TMP4]], i64 [[TMP6]], i32 7, i32 2, i1 true)
+// CHECK-NEXT:    [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP7]], ptr [[RES_ASCAST]], align 8
 // CHECK-NEXT:    ret void
 //
@@ -110,10 +110,10 @@ __attribute__((device)) void test_volatile_parameter64(volatile __UINT64_TYPE__
 // CHECK-LABEL: @_Z13test_shared32v(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
 // CHECK-NEXT:    ret void
 //
@@ -128,10 +128,10 @@ __attribute__((device)) void test_shared32() {
 // CHECK-LABEL: @_Z13test_shared64v(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
 // CHECK-NEXT:    ret void
 //
@@ -147,10 +147,10 @@ __attribute__((device)) __UINT32_TYPE__ global_val32;
 // CHECK-LABEL: @_Z13test_global32v(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
 // CHECK-NEXT:    ret void
 //
@@ -164,10 +164,10 @@ __attribute__((device)) __UINT64_TYPE__ global_val64;
 // CHECK-LABEL: @_Z13test_global64v(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
 // CHECK-NEXT:    ret void
 //
@@ -183,10 +183,10 @@ __attribute__((constant)) __UINT32_TYPE__ cval32;
 // CHECK-NEXT:    [[LOCAL_VAL:%.*]] = alloca i32, align 4, addrspace(5)
 // CHECK-NEXT:    [[LOCAL_VAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCAL_VAL]] to ptr
 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP1]], ptr [[LOCAL_VAL_ASCAST]], align 4
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP3]], ptr [[LOCAL_VAL_ASCAST]], align 4
 // CHECK-NEXT:    ret void
 //
@@ -204,10 +204,10 @@ __attribute__((constant)) __UINT64_TYPE__ cval64;
 // CHECK-NEXT:    [[LOCAL_VAL:%.*]] = alloca i64, align 8, addrspace(5)
 // CHECK-NEXT:    [[LOCAL_VAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCAL_VAL]] to ptr
 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP0]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP1]], ptr [[LOCAL_VAL_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP3]], ptr [[LOCAL_VAL_ASCAST]], align 8
 // CHECK-NEXT:    ret void
 //
@@ -222,22 +222,22 @@ __attribute__((device)) void test_constant64() {
 // CHECK-LABEL: @_Z12test_order32v(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP0]], i32 2, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP0]] syncscope("workgroup") monotonic, align 4
 // CHECK-NEXT:    store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP2]], i32 4, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") acquire, align 4
 // CHECK-NEXT:    store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP4]], i32 4, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP5:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP4]] syncscope("workgroup") acquire, align 4
 // CHECK-NEXT:    store i32 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
 // CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP6]], i32 5, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP6]] syncscope("workgroup") release, align 4
 // CHECK-NEXT:    store i32 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
 // CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP9:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP8]], i32 6, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP9:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP8]] syncscope("workgroup") acq_rel, align 4
 // CHECK-NEXT:    store i32 [[TMP9]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
 // CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP10]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP10]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP11]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
 // CHECK-NEXT:    ret void
 //
@@ -260,22 +260,22 @@ __attribute__((device)) void test_order32() {
 // CHECK-LABEL: @_Z12test_order64v(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP0]], i32 2, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP0]] syncscope("workgroup") monotonic, align 8
 // CHECK-NEXT:    store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP2]], i32 4, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") acquire, align 8
 // CHECK-NEXT:    store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
 // CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP4]], i32 4, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP5:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP4]] syncscope("workgroup") acquire, align 8
 // CHECK-NEXT:    store i64 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
 // CHECK-NEXT:    [[TMP6:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP6]], i32 5, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP6]] syncscope("workgroup") release, align 8
 // CHECK-NEXT:    store i64 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
 // CHECK-NEXT:    [[TMP8:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP8]], i32 6, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP9:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP8]] syncscope("workgroup") acq_rel, align 8
 // CHECK-NEXT:    store i64 [[TMP9]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
 // CHECK-NEXT:    [[TMP10:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP10]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP10]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP11]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
 // CHECK-NEXT:    ret void
 //
@@ -298,16 +298,16 @@ __attribute__((device)) void test_order64() {
 // CHECK-LABEL: @_Z12test_scope32v(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP0]], i32 7, i32 1, i1 false)
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP0]] seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP4]], i32 7, i32 3, i1 false)
+// CHECK-NEXT:    [[TMP5:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP4]] syncscope("agent") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
 // CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
-// CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP6]], i32 7, i32 4, i1 false)
+// CHECK-NEXT:    [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP6]] syncscope("wavefront") seq_cst, align 4
 // CHECK-NEXT:    store i32 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
 // CHECK-NEXT:    ret void
 //
@@ -326,16 +326,16 @@ __attribute__((device)) void test_scope32() {
 // CHECK-LABEL: @_Z12test_scope64v(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP0]], i32 7, i32 1, i1 false)
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP0]] seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP2]], i32 7, i32 2, i1 false)
+// CHECK-NEXT:    [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
 // CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP4]], i32 7, i32 3, i1 false)
+// CHECK-NEXT:    [[TMP5:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP4]] syncscope("agent") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
 // CHECK-NEXT:    [[TMP6:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
-// CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP6]], i32 7, i32 4, i1 false)
+// CHECK-NEXT:    [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP6]] syncscope("wavefront") seq_cst, align 8
 // CHECK-NEXT:    store i64 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
 // CHECK-NEXT:    ret void
 //

diff  --git a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
index f611bc6f16d15..18725d4193efb 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
@@ -810,6 +810,26 @@ kernel void test_s_setreg(uint val) {
   __builtin_amdgcn_s_setreg(8193, val);
 }
 
+// CHECK-LABEL test_atomic_inc_dec(
+void test_atomic_inc_dec(local uint *lptr, global uint *gptr, uint val) {
+  uint res;
+
+  // CHECK: atomicrmw uinc_wrap ptr addrspace(3) %lptr, i32 %val syncscope("workgroup") seq_cst, align 4
+  res = __builtin_amdgcn_atomic_inc32(lptr, val, __ATOMIC_SEQ_CST, "workgroup");
+
+  // CHECK: atomicrmw udec_wrap ptr addrspace(3) %lptr, i32 %val syncscope("workgroup") seq_cst, align 4
+  res = __builtin_amdgcn_atomic_dec32(lptr, val, __ATOMIC_SEQ_CST, "workgroup");
+
+  // CHECK: atomicrmw uinc_wrap ptr addrspace(1) %gptr, i32 %val syncscope("agent") seq_cst, align 4
+  res = __builtin_amdgcn_atomic_inc32(gptr, val, __ATOMIC_SEQ_CST, "agent");
+
+  // CHECK: atomicrmw udec_wrap ptr addrspace(1) %gptr, i32 %val seq_cst, align 4
+  res = __builtin_amdgcn_atomic_dec32(gptr, val, __ATOMIC_SEQ_CST, "");
+
+  // CHECK: atomicrmw volatile udec_wrap ptr addrspace(1) %gptr, i32 %val seq_cst, align 4
+  res = __builtin_amdgcn_atomic_dec32((volatile global uint*)gptr, val, __ATOMIC_SEQ_CST, "");
+}
+
 // CHECK-DAG: [[$WI_RANGE]] = !{i32 0, i32 1024}
 // CHECK-DAG: [[$WS_RANGE]] = !{i16 1, i16 1025}
 // CHECK-DAG: attributes #[[$NOUNWIND_READONLY]] = { mustprogress nocallback nofree nosync nounwind willreturn memory(read) }


        


More information about the cfe-commits mailing list