[llvm] [AMDGPU] Introduce orderign parameter to atomic intrinsics and introduce new llvm.amdgcn.image.atomic.load intrinsic. (PR #73613)

via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 22 05:38:56 PST 2023


https://github.com/sstipanovic updated https://github.com/llvm/llvm-project/pull/73613

>From 33960a42cad7f7156017fa05fab5a6b2455fb3f9 Mon Sep 17 00:00:00 2001
From: Stefan Stipanovic <Stefan.Stipanovic at amd.com>
Date: Fri, 22 Dec 2023 14:38:14 +0100
Subject: [PATCH] [AMDGPU] Introduce llvm.amdgcn.image.atomic.load intrinsic.
 This intrinsic should behave mostly identically to an llvm.amdgcn.image.load,
 except that:     - It is not marked as IntrReadMem. This is to ensure that
 the implied memory semantics are preserved.     - When lowering, it's
 MachineMemOperand is to get the "acquire" memory semantics.

---
 llvm/include/llvm/CodeGen/SelectionDAG.h      |  9 ++++--
 llvm/include/llvm/CodeGen/TargetLowering.h    |  2 ++
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      | 16 ++++++----
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  |  6 ++--
 .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 10 +++++--
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  6 ++--
 llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h      |  1 +
 .../AMDGPU/AMDGPUInstructionSelector.cpp      |  2 ++
 llvm/lib/Target/AMDGPU/MIMGInstructions.td    | 30 +++++++++++++++++++
 llvm/lib/Target/AMDGPU/SIDefines.h            |  2 ++
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     | 14 +++++++++
 llvm/test/CodeGen/AMDGPU/atomic-image-load.ll | 29 ++++++++++++++++++
 12 files changed, 111 insertions(+), 16 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/atomic-image-load.ll

diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index e867448b9d5512..c7a6d4862969b1 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -36,6 +36,7 @@
 #include "llvm/IR/Metadata.h"
 #include "llvm/Support/Allocator.h"
 #include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/AtomicOrdering.h"
 #include "llvm/Support/CodeGen.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/RecyclingAllocator.h"
@@ -1297,7 +1298,8 @@ class SelectionDAG {
       EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
       MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
                                        MachineMemOperand::MOStore,
-      uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes());
+      uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes(),
+      AtomicOrdering Ordering = AtomicOrdering::NotAtomic);
 
   inline SDValue getMemIntrinsicNode(
       unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
@@ -1305,11 +1307,12 @@ class SelectionDAG {
       MaybeAlign Alignment = std::nullopt,
       MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
                                        MachineMemOperand::MOStore,
-      uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
+      uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes(),
+      AtomicOrdering Ordering = AtomicOrdering::NotAtomic) {
     // Ensure that codegen never sees alignment 0
     return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
                                Alignment.value_or(getEVTAlign(MemVT)), Flags,
-                               Size, AAInfo);
+                               Size, AAInfo, Ordering);
   }
 
   SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 490125164ab341..78b423177c233f 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -1154,6 +1154,8 @@ class TargetLoweringBase {
     MaybeAlign align = Align(1);   // alignment
 
     MachineMemOperand::Flags flags = MachineMemOperand::MONone;
+
+    AtomicOrdering ordering = AtomicOrdering::NotAtomic;
     IntrinsicInfo() = default;
   };
 
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 06f9c0445bceac..03c04605ba8137 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -796,7 +796,8 @@ class AMDGPUImageDimIntrinsicEval<AMDGPUDimProfile P_> {
 // All dimension-aware intrinsics are derived from this class.
 class AMDGPUImageDimIntrinsic<AMDGPUDimProfile P_,
                               list<IntrinsicProperty> props,
-                              list<SDNodeProperty> sdnodeprops> : DefaultAttrsIntrinsic<
+                              list<SDNodeProperty> sdnodeprops,
+                              string name = ""> : Intrinsic<
     P_.RetTypes,        // vdata(VGPR) -- for load/atomic-with-return
     !listconcat(
       !foreach(arg, P_.DataArgs, arg.Type),      // vdata(VGPR) -- for store/atomic
@@ -806,13 +807,14 @@ class AMDGPUImageDimIntrinsic<AMDGPUDimProfile P_,
       !if(P_.IsSample, [llvm_v4i32_ty,           // samp(SGPR)
                         llvm_i1_ty], []),        // unorm(imm)
       [llvm_i32_ty,                              // texfailctrl(imm; bit 0 = tfe, bit 1 = lwe)
-       llvm_i32_ty]),                            // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc)
+       llvm_i32_ty]),                             // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc)
 
-     !listconcat(props,
+     !listconcat(props, [IntrNoCallback, IntrNoFree, IntrWillReturn],
           !if(P_.IsAtomic, [], [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.DmaskArgIndex>>]),
           !if(P_.IsSample, [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.UnormArgIndex>>], []),
           [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.TexFailCtrlArgIndex>>,
-           ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.CachePolicyArgIndex>>]),
+           ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.CachePolicyArgIndex>>],
+          !if(!or(P_.IsAtomic, !eq(name, "int_amdgcn_image_atomic_load")), [], [IntrNoSync])),
 
 
       "", sdnodeprops>,
@@ -858,7 +860,7 @@ defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {
       def !strconcat(NAME, "_", dim.Name)
         : AMDGPUImageDimIntrinsic<
             AMDGPUDimNoSampleProfile<opmod, dim, retty, dataargs, Mip>,
-            props, sdnodeprops>;
+            props, sdnodeprops, NAME>;
     }
   }
 
@@ -866,6 +868,10 @@ defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {
     : AMDGPUImageDimIntrinsicsAll<"LOAD", [llvm_any_ty], [], [IntrReadMem],
                                   [SDNPMemOperand]>,
       AMDGPUImageDMaskIntrinsic;
+  defm int_amdgcn_image_atomic_load
+    : AMDGPUImageDimIntrinsicsAll<"ATOMIC_LOAD", [llvm_any_ty], [], [],
+                                  [SDNPMemOperand]>,
+      AMDGPUImageDMaskIntrinsic;
   defm int_amdgcn_image_load_mip
     : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_any_ty], [],
                                      [IntrReadMem, IntrWillReturn], [SDNPMemOperand], 1>,
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 14a4e72152e7c4..93127600e0d125 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2607,8 +2607,10 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
       MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
     else if (Info.fallbackAddressSpace)
       MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
-    MIB.addMemOperand(
-        MF->getMachineMemOperand(MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata()));
+    MIB.addMemOperand(MF->getMachineMemOperand(
+        MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata(),
+        /*Ranges*/ nullptr, /*SSID*/ SyncScope::System, Info.ordering,
+        Info.ordering));
   }
 
   return true;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 51ae8b703e50f2..9e94d6016b5cb1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -55,8 +55,10 @@
 #include "llvm/IR/DerivedTypes.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Metadata.h"
 #include "llvm/IR/Type.h"
+#include "llvm/Support/AtomicOrdering.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/CodeGen.h"
 #include "llvm/Support/Compiler.h"
@@ -8335,15 +8337,17 @@ SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
 SDValue SelectionDAG::getMemIntrinsicNode(
     unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
     EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
-    MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
+    MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo,
+    AtomicOrdering Ordering) {
   if (!Size && MemVT.isScalableVector())
     Size = MemoryLocation::UnknownSize;
   else if (!Size)
     Size = MemVT.getStoreSize();
 
   MachineFunction &MF = getMachineFunction();
-  MachineMemOperand *MMO =
-      MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo);
+  MachineMemOperand *MMO = MF.getMachineMemOperand(
+      PtrInfo, Flags, Size, Alignment, AAInfo, /*Ranges*/ nullptr,
+      /*SSID*/ SyncScope::System, Ordering, Ordering);
 
   return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
 }
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index ed1c96a873748f..f0ff6b16756f8b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4986,9 +4986,9 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
       MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
     else if (Info.fallbackAddressSpace)
       MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
-    Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops,
-                                     Info.memVT, MPI, Info.align, Info.flags,
-                                     Info.size, I.getAAMetadata());
+    Result = DAG.getMemIntrinsicNode(
+        Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT, MPI, Info.align,
+        Info.flags, Info.size, I.getAAMetadata(), Info.ordering);
   } else if (!HasChain) {
     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
   } else if (!I.getType()->isVoidTy()) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
index 515decea3921ed..585d99ffc862ff 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
@@ -81,6 +81,7 @@ struct ImageDimIntrinsicInfo {
   uint8_t UnormIndex;
   uint8_t TexFailCtrlIndex;
   uint8_t CachePolicyIndex;
+  // uint8_t AtomicOrderingIndex;
 
   uint8_t BiasTyArg;
   uint8_t GradientTyArg;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index d24c7da964ce85..902f7874067c49 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -17,6 +17,7 @@
 #include "AMDGPUInstrInfo.h"
 #include "AMDGPURegisterBankInfo.h"
 #include "AMDGPUTargetMachine.h"
+#include "SIDefines.h"
 #include "SIMachineFunctionInfo.h"
 #include "Utils/AMDGPUBaseInfo.h"
 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
@@ -1901,6 +1902,7 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
 
   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
+  CPol &= ~AMDGPU::CPol::ATOMIC_ORDERING;
   if (BaseOpcode->Atomic)
     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
   if (CPol & ~AMDGPU::CPol::ALL)
diff --git a/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
index 240366c8e7daae..25aabcc5b0b405 100644
--- a/llvm/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
@@ -653,6 +653,31 @@ multiclass MIMG_NoSampler <mimgopc op, string asm, bit has_d16, bit mip = 0,
   }
 }
 
+multiclass MIMG_NoSampler_Pseudo <mimgopc op, string asm, bit has_d16, bit mip = 0,
+                           bit isResInfo = 0,
+                           bit msaa = 0> {
+  def "" : MIMGBaseOpcode {
+    let Coordinates = !not(isResInfo);
+    let LodOrClampOrMip = mip;
+    let HasD16 = has_d16;
+    let MSAA = msaa;
+  }
+
+  let BaseOpcode = !cast<MIMGBaseOpcode>(NAME),
+      mayLoad = !not(isResInfo), isCodeGenOnly = 1 in {
+    let VDataDwords = 1 in
+    defm _V1 : MIMG_NoSampler_Src_Helper <op, asm, VGPR_32, 1, msaa>;
+    let VDataDwords = 2 in
+    defm _V2 : MIMG_NoSampler_Src_Helper <op, asm, VReg_64, 0, msaa>;
+    let VDataDwords = 3 in
+    defm _V3 : MIMG_NoSampler_Src_Helper <op, asm, VReg_96, 0, msaa>;
+    let VDataDwords = 4 in
+    defm _V4 : MIMG_NoSampler_Src_Helper <op, asm, VReg_128, 0, msaa>;
+    let VDataDwords = 5 in
+    defm _V5 : MIMG_NoSampler_Src_Helper <op, asm, VReg_160, 0, msaa>;
+  }
+}
+
 class MIMG_Store_Helper <mimgopc op, string asm,
                          RegisterClass data_rc,
                          RegisterClass addr_rc,
@@ -1554,7 +1579,11 @@ defm IMAGE_ATOMIC_FCMPSWAP      : MIMG_Atomic <mimgopc<MIMG.NOP, MIMG.NOP, 0x1d,
 defm IMAGE_ATOMIC_FMIN          : MIMG_Atomic <mimgopc<MIMG.NOP, MIMG.NOP, 0x1e, MIMG.NOP>, "image_atomic_fmin", 0, 1>;
 defm IMAGE_ATOMIC_FMAX          : MIMG_Atomic <mimgopc<MIMG.NOP, MIMG.NOP, 0x1f, MIMG.NOP>, "image_atomic_fmax", 0, 1>;
 
+
+defm IMAGE_ATOMIC_LOAD          : MIMG_NoSampler_Pseudo <mimgopc<0x00, 0x00, 0x00>, "image_load", 1>;
+
 defm IMAGE_SAMPLE               : MIMG_Sampler_WQM <mimgopc<0x1b, 0x1b, 0x20>, AMDGPUSample>;
+
 let OtherPredicates = [HasExtendedImageInsts] in {
 defm IMAGE_SAMPLE_CL            : MIMG_Sampler_WQM <mimgopc<0x40, 0x40, 0x21>, AMDGPUSample_cl>;
 defm IMAGE_SAMPLE_D             : MIMG_Sampler <mimgopc<0x1c, 0x1c, 0x22>, AMDGPUSample_d>;
@@ -1698,6 +1727,7 @@ class ImageDimIntrinsicInfo<AMDGPUImageDimIntrinsic I> {
   bits<8> UnormIndex = DimEval.UnormArgIndex;
   bits<8> TexFailCtrlIndex = DimEval.TexFailCtrlArgIndex;
   bits<8> CachePolicyIndex = DimEval.CachePolicyArgIndex;
+  // bits<8> AtomicOrderingIndex = DimEval.AtomicOrderingIndex;
 
   bits<8> BiasTyArg = !add(I.P.NumRetAndDataAnyTypes,
     !if(!eq(NumOffsetArgs, 0), 0, I.P.ExtraAddrArgs[0].Type.isAny));
diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h
index 659ff75e13d01f..cf6eedf5f2b72d 100644
--- a/llvm/lib/Target/AMDGPU/SIDefines.h
+++ b/llvm/lib/Target/AMDGPU/SIDefines.h
@@ -353,6 +353,7 @@ enum CPol {
   SLC = 2,
   DLC = 4,
   SCC = 16,
+  // ATOMIC_ORDERING = 32,
   SC0 = GLC,
   SC1 = SCC,
   NT = SLC,
@@ -397,6 +398,7 @@ enum CPol {
   TH_TYPE_STORE = 1 << 8,   // TH_STORE policy
   TH_TYPE_ATOMIC = 1 << 9,  // TH_ATOMIC policy
   TH_REAL_BYPASS = 1 << 10, // is TH=3 bypass policy or not
+  ATOMIC_ORDERING = 0x70, // Atomic ordering bits mask
 };
 
 } // namespace CPol
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a7f4d63229b7ef..e3a03e88a95545 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -17,8 +17,10 @@
 #include "AMDGPUTargetMachine.h"
 #include "GCNSubtarget.h"
 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "SIDefines.h"
 #include "SIMachineFunctionInfo.h"
 #include "SIRegisterInfo.h"
+#include "Utils/AMDGPUBaseInfo.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/FloatingPointMode.h"
 #include "llvm/ADT/Statistic.h"
@@ -39,6 +41,7 @@
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
 #include "llvm/IR/IntrinsicsR600.h"
+#include "llvm/Support/AtomicOrdering.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/KnownBits.h"
 #include "llvm/Support/ModRef.h"
@@ -1131,6 +1134,17 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
       // XXX - Should this be volatile without known ordering?
       Info.flags |= MachineMemOperand::MOVolatile;
 
+      if (RsrcIntr->IsImage) {
+        auto Idx = CI.arg_size() - 1;
+        unsigned OrderingArg =
+            cast<ConstantInt>(CI.getArgOperand(Idx))->getZExtValue();
+        auto Ordering = (OrderingArg & AMDGPU::CPol::ATOMIC_ORDERING) >> 4;
+        unsigned ClearedCPol = OrderingArg & ~AMDGPU::CPol::ATOMIC_ORDERING;
+        ConstantInt *CPol = ConstantInt::get(IntegerType::getInt32Ty(CI.getContext()), ClearedCPol);
+        const_cast<CallInst &>(CI).setArgOperand(Idx, CPol);
+        Info.ordering = static_cast<AtomicOrdering>(Ordering);
+      }
+
       switch (IntrID) {
       default:
         break;
diff --git a/llvm/test/CodeGen/AMDGPU/atomic-image-load.ll b/llvm/test/CodeGen/AMDGPU/atomic-image-load.ll
new file mode 100644
index 00000000000000..95e2695ee97ffc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/atomic-image-load.ll
@@ -0,0 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=gfx1010 < %s | FileCheck %s -check-prefix=GFX10
+; RUN: llc -march=amdgcn -mcpu=gfx900 < %s | FileCheck %s -check-prefix=GFX9
+; RUN: llc -march=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefix=GFX11
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 < %s | FileCheck %s -check-prefix=GFX10
+
+define amdgpu_ps void @test(<8 x i32> inreg %load, <8 x i32> inreg %store) {
+; GFX10-LABEL: test:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    v_mov_b32_e32 v0, 0
+; GFX10-NEXT:    image_load v0, v0, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX10-NEXT:    s_endpgm
+;
+; GFX9-LABEL: test:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    image_load v0, v0, s[0:7] dmask:0x1 unorm
+; GFX9-NEXT:    s_endpgm
+;
+; GFX11-LABEL: test:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    v_mov_b32_e32 v0, 0
+; GFX11-NEXT:    image_load v0, v0, s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_1D unorm
+; GFX11-NEXT:    s_endpgm
+  %data0 = call float @llvm.amdgcn.image.atomic.load.1d.f32.i32(i32 1, i32 0, <8 x i32> %load, i32 0, i32 112)
+  ret void
+}
+
+declare float @llvm.amdgcn.image.atomic.load.1d.f32.i32(i32 immarg, i32, <8 x i32>, i32 immarg, i32 immarg)



More information about the llvm-commits mailing list