[llvm] r263140 - AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics

Nicolai Haehnle via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 10 10:43:51 PST 2016


Author: nha
Date: Thu Mar 10 12:43:50 2016
New Revision: 263140

URL: http://llvm.org/viewvc/llvm-project?rev=263140&view=rev
Log:
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics

Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.

The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.

For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.

BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.

Reviewers: arsenm, tstellarAMD, mareko

Subscribers: arsenm, llvm-commits

Differential Revision: http://reviews.llvm.org/D17277

Added:
    llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.format.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.format.ll
Modified:
    llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td
    llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
    llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp

Modified: llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td?rev=263140&r1=263139&r2=263140&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td (original)
+++ llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td Thu Mar 10 12:43:50 2016
@@ -205,6 +205,28 @@ def int_amdgcn_image_atomic_cmpswap : In
    llvm_i1_ty],       // slc(imm)
   []>;
 
+def int_amdgcn_buffer_load_format : Intrinsic <
+  [llvm_v4f32_ty],
+  [llvm_v4i32_ty,     // rsrc(SGPR)
+   llvm_i32_ty,       // soffset(SGPR)
+   llvm_i32_ty,       // offset(imm)
+   llvm_i32_ty,       // vindex(VGPR)
+   llvm_i32_ty,       // voffset(VGPR)
+   llvm_i1_ty,        // glc(imm)
+   llvm_i1_ty],       // slc(imm)
+  [IntrReadMem]>;
+
+def int_amdgcn_buffer_store_format : Intrinsic <
+  [],
+  [llvm_anyfloat_ty,  // vdata(VGPR) -- can currently only select v4f32
+   llvm_v4i32_ty,     // rsrc(SGPR)
+   llvm_i32_ty,       // soffset(SGPR)
+   llvm_i32_ty,       // offset(imm)
+   llvm_i32_ty,       // vindex(VGPR)
+   llvm_i32_ty,       // voffset(VGPR)
+   llvm_i1_ty,        // glc(imm)
+   llvm_i1_ty],       // slc(imm)
+  []>;
 
 def int_amdgcn_read_workdim : AMDGPUReadPreloadRegisterIntrinsic <
   "__builtin_amdgcn_read_workdim">;

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstructions.td?rev=263140&r1=263139&r2=263140&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstructions.td Thu Mar 10 12:43:50 2016
@@ -949,18 +949,23 @@ defm BUFFER_LOAD_FORMAT_XYZ : MUBUF_Load
 defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <
   mubuf<0x03>, "buffer_load_format_xyzw", VReg_128
 >;
-defm BUFFER_STORE_FORMAT_X : MUBUF_Store_Helper <
-  mubuf<0x04>, "buffer_store_format_x", VGPR_32
->;
-defm BUFFER_STORE_FORMAT_XY : MUBUF_Store_Helper <
-  mubuf<0x05>, "buffer_store_format_xy", VReg_64
->;
-defm BUFFER_STORE_FORMAT_XYZ : MUBUF_Store_Helper <
-  mubuf<0x06>, "buffer_store_format_xyz", VReg_96
->;
-defm BUFFER_STORE_FORMAT_XYZW : MUBUF_Store_Helper <
-  mubuf<0x07>, "buffer_store_format_xyzw", VReg_128
->;
+// Without mayLoad and hasSideEffects, TableGen complains about the pattern
+// matching llvm.amdgcn.buffer.store.format. Eventually, we'll need a way
+// to express the effects of the intrinsic more precisely.
+let mayLoad = 1, hasSideEffects = 1 in {
+  defm BUFFER_STORE_FORMAT_X : MUBUF_Store_Helper <
+    mubuf<0x04>, "buffer_store_format_x", VGPR_32
+  >;
+  defm BUFFER_STORE_FORMAT_XY : MUBUF_Store_Helper <
+    mubuf<0x05>, "buffer_store_format_xy", VReg_64
+  >;
+  defm BUFFER_STORE_FORMAT_XYZ : MUBUF_Store_Helper <
+    mubuf<0x06>, "buffer_store_format_xyz", VReg_96
+  >;
+  defm BUFFER_STORE_FORMAT_XYZW : MUBUF_Store_Helper <
+    mubuf<0x07>, "buffer_store_format_xyzw", VReg_128
+  >;
+}
 defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <
   mubuf<0x08, 0x10>, "buffer_load_ubyte", VGPR_32, i32, az_extloadi8_global
 >;
@@ -2094,6 +2099,74 @@ def : Pat <
 >;
 
 //===----------------------------------------------------------------------===//
+// buffer_load/store_format patterns
+//===----------------------------------------------------------------------===//
+def : Pat<
+  (int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, 0, 0,
+                                 imm:$glc, imm:$slc),
+  (BUFFER_LOAD_FORMAT_XYZW_OFFSET $rsrc, $soffset, (as_i16imm $offset),
+    (as_i1imm $glc), (as_i1imm $slc), 0)
+>;
+
+def : Pat<
+  (int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, i32:$vindex, 0,
+                                 imm:$glc, imm:$slc),
+  (BUFFER_LOAD_FORMAT_XYZW_IDXEN $vindex, $rsrc, $soffset, (as_i16imm $offset),
+    (as_i1imm $glc), (as_i1imm $slc), 0)
+>;
+
+def : Pat<
+  (int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, 0, i32:$voffset,
+                                 imm:$glc, imm:$slc),
+  (BUFFER_LOAD_FORMAT_XYZW_OFFEN $voffset, $rsrc, $soffset, (as_i16imm $offset),
+    (as_i1imm $glc), (as_i1imm $slc), 0)
+>;
+
+def : Pat<
+  (int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, i32:$vindex, i32:$voffset,
+                                 imm:$glc, imm:$slc),
+  (BUFFER_LOAD_FORMAT_XYZW_BOTHEN
+    (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1),
+    $rsrc, $soffset, (as_i16imm $offset),
+    (as_i1imm $glc), (as_i1imm $slc), 0)
+>;
+
+def : Pat<
+  (int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc,
+                                  i32:$soffset, imm:$offset, 0, 0,
+                                  imm:$glc, imm:$slc),
+  (BUFFER_STORE_FORMAT_XYZW_OFFSET $vdata, $rsrc, $soffset, (as_i16imm $offset),
+    (as_i1imm $glc), (as_i1imm $slc), 0)
+>;
+
+def : Pat<
+  (int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc,
+                                 i32:$soffset, imm:$offset, i32:$vindex, 0,
+                                 imm:$glc, imm:$slc),
+  (BUFFER_STORE_FORMAT_XYZW_IDXEN $vdata, $vindex, $rsrc, $soffset,
+    (as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc), 0)
+>;
+
+def : Pat<
+  (int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc,
+                                  i32:$soffset, imm:$offset, 0, i32:$voffset,
+                                  imm:$glc, imm:$slc),
+  (BUFFER_STORE_FORMAT_XYZW_OFFEN $vdata, $voffset, $rsrc, $soffset,
+    (as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc), 0)
+>;
+
+def : Pat<
+  (int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc, i32:$soffset,
+                                  imm:$offset, i32:$vindex, i32:$voffset,
+                                  imm:$glc, imm:$slc),
+  (BUFFER_STORE_FORMAT_XYZW_BOTHEN
+    $vdata,
+    (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1),
+    $rsrc, $soffset, (as_i16imm $offset),
+    (as_i1imm $glc), (as_i1imm $slc), 0)
+>;
+
+//===----------------------------------------------------------------------===//
 // S_GETREG_B32 Intrinsic Pattern.
 //===----------------------------------------------------------------------===//
 def : Pat <

Modified: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp?rev=263140&r1=263139&r2=263140&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp Thu Mar 10 12:43:50 2016
@@ -236,7 +236,7 @@ void SIRegisterInfo::buildScratchLoadSto
       static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
   LLVMContext &Ctx = MF->getFunction()->getContext();
   DebugLoc DL = MI->getDebugLoc();
-  bool IsLoad = TII->get(LoadStoreOp).mayLoad();
+  bool IsStore = TII->get(LoadStoreOp).mayStore();
 
   bool RanOutOfSGPRs = false;
   bool Scavenged = false;
@@ -272,14 +272,14 @@ void SIRegisterInfo::buildScratchLoadSto
       SOffsetRegState |= RegState::Kill;
 
     BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
-      .addReg(SubReg, getDefRegState(IsLoad))
+      .addReg(SubReg, getDefRegState(!IsStore))
       .addReg(ScratchRsrcReg)
       .addReg(SOffset, SOffsetRegState)
       .addImm(Offset)
       .addImm(0) // glc
       .addImm(0) // slc
       .addImm(0) // tfe
-      .addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
+      .addReg(Value, RegState::Implicit | getDefRegState(!IsStore))
       .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
   }
 }

Added: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.format.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.format.ll?rev=263140&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.format.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.format.ll Thu Mar 10 12:43:50 2016
@@ -0,0 +1,69 @@
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
+
+;CHECK-LABEL: {{^}}buffer_load:
+;CHECK: buffer_load_format_xyzw v[0:3], s[0:3], s4
+;CHECK: buffer_load_format_xyzw v[4:7], s[0:3], s4 glc
+;CHECK: buffer_load_format_xyzw v[8:11], s[0:3], s4 slc
+;CHECK: s_waitcnt
+define {<4 x float>, <4 x float>, <4 x float>} @buffer_load(<4 x i32> inreg, i32 inreg) #0 {
+main_body:
+  %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 0)
+  %data_glc = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 1, i1 0)
+  %data_slc = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 1)
+  %r0 = insertvalue {<4 x float>, <4 x float>, <4 x float>} undef, <4 x float> %data, 0
+  %r1 = insertvalue {<4 x float>, <4 x float>, <4 x float>} %r0, <4 x float> %data_glc, 1
+  %r2 = insertvalue {<4 x float>, <4 x float>, <4 x float>} %r1, <4 x float> %data_slc, 2
+  ret {<4 x float>, <4 x float>, <4 x float>} %r2
+}
+
+;CHECK-LABEL: {{^}}buffer_load_immoffs:
+;CHECK: buffer_load_format_xyzw v[0:3], s[0:3], s4 offset:42
+;CHECK: s_waitcnt
+define <4 x float> @buffer_load_immoffs(<4 x i32> inreg, i32 inreg) #0 {
+main_body:
+  %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 42, i32 0, i32 0, i1 0, i1 0)
+  ret <4 x float> %data
+}
+
+;CHECK-LABEL: {{^}}buffer_load_idx:
+;CHECK: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
+;CHECK: s_waitcnt
+define <4 x float> @buffer_load_idx(<4 x i32> inreg, i32) #0 {
+main_body:
+  %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %1, i32 0, i1 0, i1 0)
+  ret <4 x float> %data
+}
+
+;CHECK-LABEL: {{^}}buffer_load_ofs:
+;CHECK: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 offen
+;CHECK: s_waitcnt
+define <4 x float> @buffer_load_ofs(<4 x i32> inreg, i32) #0 {
+main_body:
+  %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 0, i32 %1, i1 0, i1 0)
+  ret <4 x float> %data
+}
+
+;CHECK-LABEL: {{^}}buffer_load_both:
+;CHECK: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+;CHECK: s_waitcnt
+define <4 x float> @buffer_load_both(<4 x i32> inreg, i32, i32) #0 {
+main_body:
+  %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %1, i32 %2, i1 0, i1 0)
+  ret <4 x float> %data
+}
+
+;CHECK-LABEL: {{^}}buffer_load_both_reversed:
+;CHECK: v_mov_b32_e32 v2, v0
+;CHECK: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
+;CHECK: s_waitcnt
+define <4 x float> @buffer_load_both_reversed(<4 x i32> inreg, i32, i32) #0 {
+main_body:
+  %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %2, i32 %1, i1 0, i1 0)
+  ret <4 x float> %data
+}
+
+declare <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32>, i32, i32, i32, i32, i1, i1) #1
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readonly }

Added: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.format.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.format.ll?rev=263140&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.format.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.format.ll Thu Mar 10 12:43:50 2016
@@ -0,0 +1,78 @@
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
+
+;CHECK-LABEL: {{^}}buffer_store:
+;CHECK: buffer_store_format_xyzw v[0:3], s[0:3], s4
+;CHECK: buffer_store_format_xyzw v[4:7], s[0:3], s4 glc
+;CHECK: buffer_store_format_xyzw v[8:11], s[0:3], s4 slc
+define void @buffer_store(<4 x i32> inreg, i32 inreg, <4 x float>, <4 x float>, <4 x float>) #0 {
+main_body:
+  call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 0)
+  call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %3, <4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 1, i1 0)
+  call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %4, <4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 1)
+  ret void
+}
+
+;CHECK-LABEL: {{^}}buffer_store_immoffs:
+;CHECK: buffer_store_format_xyzw v[0:3], s[0:3], s4 offset:42
+define void @buffer_store_immoffs(<4 x i32> inreg, i32 inreg, <4 x float>) #0 {
+main_body:
+  call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 %1, i32 42, i32 0, i32 0, i1 0, i1 0)
+  ret void
+}
+
+;CHECK-LABEL: {{^}}buffer_store_idx:
+;CHECK: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 idxen
+define void @buffer_store_idx(<4 x i32> inreg, i32 inreg, <4 x float>, i32) #0 {
+main_body:
+  call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %3, i32 0, i1 0, i1 0)
+  ret void
+}
+
+;CHECK-LABEL: {{^}}buffer_store_ofs:
+;CHECK: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 offen
+define void @buffer_store_ofs(<4 x i32> inreg, i32 inreg, <4 x float>, i32) #0 {
+main_body:
+  call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 %3, i1 0, i1 0)
+  ret void
+}
+
+;CHECK-LABEL: {{^}}buffer_store_both:
+;CHECK: buffer_store_format_xyzw v[0:3], v[4:5], s[0:3], 0 idxen offen
+define void @buffer_store_both(<4 x i32> inreg, i32 inreg, <4 x float>, i32, i32) #0 {
+main_body:
+  call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %3, i32 %4, i1 0, i1 0)
+  ret void
+}
+
+;CHECK-LABEL: {{^}}buffer_store_both_reversed:
+;CHECK: v_mov_b32_e32 v6, v4
+;CHECK: buffer_store_format_xyzw v[0:3], v[5:6], s[0:3], 0 idxen offen
+define void @buffer_store_both_reversed(<4 x i32> inreg, i32 inreg, <4 x float>, i32, i32) #0 {
+main_body:
+  call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %4, i32 %3, i1 0, i1 0)
+  ret void
+}
+
+; Ideally, the register allocator would avoid the wait here
+;
+;CHECK-LABEL: {{^}}buffer_store_wait:
+;CHECK: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 idxen
+;CHECK: s_waitcnt vmcnt(0) expcnt(0)
+;CHECK: buffer_load_format_xyzw v[0:3], v5, s[0:3], 0 idxen
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: buffer_store_format_xyzw v[0:3], v6, s[0:3], 0 idxen
+define void @buffer_store_wait(<4 x i32> inreg, i32 inreg, <4 x float>, i32, i32, i32) #0 {
+main_body:
+  call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %3, i32 0, i1 0, i1 0)
+  %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %4, i32 0, i1 0, i1 0)
+  call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %data, <4 x i32> %0, i32 0, i32 0, i32 %5, i32 0, i1 0, i1 0)
+  ret void
+}
+
+declare void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float>, <4 x i32>, i32, i32, i32, i32, i1, i1) #1
+declare <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32>, i32, i32, i32, i32, i1, i1) #2
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind readonly }




More information about the llvm-commits mailing list