[llvm] r348625 - AMDGPU: Allow f32 types for llvm.amdgcn.s.buffer.load

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 7 10:41:39 PST 2018


Author: arsenm
Date: Fri Dec  7 10:41:39 2018
New Revision: 348625

URL: http://llvm.org/viewvc/llvm-project?rev=348625&view=rev
Log:
AMDGPU: Allow f32 types for llvm.amdgcn.s.buffer.load

Modified:
    llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/SMInstructions.td
    llvm/trunk/test/CodeGen/AMDGPU/smrd.ll

Modified: llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td?rev=348625&r1=348624&r2=348625&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td (original)
+++ llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td Fri Dec  7 10:41:39 2018
@@ -803,7 +803,7 @@ def int_amdgcn_buffer_load_format : AMDG
 def int_amdgcn_buffer_load : AMDGPUBufferLoad;
 
 def int_amdgcn_s_buffer_load : Intrinsic <
-  [llvm_anyint_ty],
+  [llvm_any_ty],
   [llvm_v4i32_ty,     // rsrc(SGPR)
    llvm_i32_ty,       // byte offset(SGPR/VGPR/imm)
    llvm_i32_ty],      // cachepolicy(imm; bit 0 = glc)
@@ -835,7 +835,7 @@ class AMDGPURawBufferLoad : Intrinsic <
   [llvm_v4i32_ty,     // rsrc(SGPR)
    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
-   llvm_i32_ty],      // cachepolicy(imm; bit 0 = glc, bit 1 = slc) 
+   llvm_i32_ty],      // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
   [IntrReadMem], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<0>;
 def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad;
@@ -847,7 +847,7 @@ class AMDGPUStructBufferLoad : Intrinsic
    llvm_i32_ty,       // vindex(VGPR)
    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
-   llvm_i32_ty],      // cachepolicy(imm; bit 0 = glc, bit 1 = slc) 
+   llvm_i32_ty],      // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
   [IntrReadMem], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<0>;
 def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad;
@@ -859,7 +859,7 @@ class AMDGPURawBufferStore : Intrinsic <
    llvm_v4i32_ty,     // rsrc(SGPR)
    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
-   llvm_i32_ty],      // cachepolicy(imm; bit 0 = glc, bit 1 = slc) 
+   llvm_i32_ty],      // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
   [IntrWriteMem], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<1>;
 def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore;
@@ -872,7 +872,7 @@ class AMDGPUStructBufferStore : Intrinsi
    llvm_i32_ty,       // vindex(VGPR)
    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
-   llvm_i32_ty],      // cachepolicy(imm; bit 0 = glc, bit 1 = slc) 
+   llvm_i32_ty],      // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
   [IntrWriteMem], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<1>;
 def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore;
@@ -884,7 +884,7 @@ class AMDGPURawBufferAtomic : Intrinsic
    llvm_v4i32_ty,     // rsrc(SGPR)
    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
-   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc) 
+   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
   [], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<1, 0>;
 def int_amdgcn_raw_buffer_atomic_swap : AMDGPURawBufferAtomic;
@@ -904,7 +904,7 @@ def int_amdgcn_raw_buffer_atomic_cmpswap
    llvm_v4i32_ty,     // rsrc(SGPR)
    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
-   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc) 
+   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
   [], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<2, 0>;
 
@@ -915,7 +915,7 @@ class AMDGPUStructBufferAtomic : Intrins
    llvm_i32_ty,       // vindex(VGPR)
    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
-   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc) 
+   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
   [], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<1, 0>;
 def int_amdgcn_struct_buffer_atomic_swap : AMDGPUStructBufferAtomic;
@@ -936,7 +936,7 @@ def int_amdgcn_struct_buffer_atomic_cmps
    llvm_i32_ty,       // vindex(VGPR)
    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
-   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc) 
+   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
   [], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<2, 0>;
 
@@ -980,7 +980,7 @@ def int_amdgcn_raw_tbuffer_load : Intrin
      llvm_i32_ty,     // offset(VGPR/imm, included in bounds checking and swizzling)
      llvm_i32_ty,     // soffset(SGPR/imm, excluded from bounds checking and swizzling)
      llvm_i32_ty,     // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
-     llvm_i32_ty],    // cachepolicy(imm; bit 0 = glc, bit 1 = slc) 
+     llvm_i32_ty],    // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
     [IntrReadMem], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<0>;
 
@@ -991,7 +991,7 @@ def int_amdgcn_raw_tbuffer_store : Intri
      llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
      llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
      llvm_i32_ty,    // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
-     llvm_i32_ty],   // cachepolicy(imm; bit 0 = glc, bit 1 = slc) 
+     llvm_i32_ty],   // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
     [IntrWriteMem], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<1>;
 
@@ -1002,7 +1002,7 @@ def int_amdgcn_struct_tbuffer_load : Int
      llvm_i32_ty,     // offset(VGPR/imm, included in bounds checking and swizzling)
      llvm_i32_ty,     // soffset(SGPR/imm, excluded from bounds checking and swizzling)
      llvm_i32_ty,     // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
-     llvm_i32_ty],    // cachepolicy(imm; bit 0 = glc, bit 1 = slc) 
+     llvm_i32_ty],    // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
     [IntrReadMem], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<0>;
 
@@ -1014,7 +1014,7 @@ def int_amdgcn_struct_tbuffer_store : In
      llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
      llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
      llvm_i32_ty,    // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
-     llvm_i32_ty],   // cachepolicy(imm; bit 0 = glc, bit 1 = slc) 
+     llvm_i32_ty],   // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
     [IntrWriteMem], "", [SDNPMemOperand]>,
   AMDGPURsrcIntrinsic<1>;
 

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=348625&r1=348624&r2=348625&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Fri Dec  7 10:41:39 2018
@@ -4873,12 +4873,13 @@ SDValue SITargetLowering::lowerSBuffer(E
   SmallVector<SDValue, 4> Loads;
   unsigned NumLoads = 1;
   MVT LoadVT = VT.getSimpleVT();
+  MVT EltVT = LoadVT.isVector() ? LoadVT.getVectorElementType() : LoadVT;
+  unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
+  assert((EltVT == MVT::i32 || EltVT == MVT::f32) &&
+         isPowerOf2_32(NumElts));
 
-  assert(LoadVT == MVT::i32 || LoadVT == MVT::v2i32 || LoadVT == MVT::v4i32 ||
-         LoadVT == MVT::v8i32 || LoadVT == MVT::v16i32);
-
-  if (VT == MVT::v8i32 || VT == MVT::v16i32) {
-    NumLoads = VT == MVT::v16i32 ? 4 : 2;
+  if (NumElts == 8 || NumElts == 16) {
+    NumLoads = NumElts == 16 ? 4 : 2;
     LoadVT = MVT::v4i32;
   }
 

Modified: llvm/trunk/lib/Target/AMDGPU/SMInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SMInstructions.td?rev=348625&r1=348624&r2=348625&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SMInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SMInstructions.td Fri Dec  7 10:41:39 2018
@@ -751,6 +751,12 @@ defm : SMLoad_Pattern <"S_BUFFER_LOAD_DW
 defm : SMLoad_Pattern <"S_BUFFER_LOAD_DWORDX4",   v4i32>;
 defm : SMLoad_Pattern <"S_BUFFER_LOAD_DWORDX8",   v8i32>;
 defm : SMLoad_Pattern <"S_BUFFER_LOAD_DWORDX16",  v16i32>;
+
+defm : SMLoad_Pattern <"S_BUFFER_LOAD_DWORD",     f32>;
+defm : SMLoad_Pattern <"S_BUFFER_LOAD_DWORDX2",   v2f32>;
+defm : SMLoad_Pattern <"S_BUFFER_LOAD_DWORDX4",   v4f32>;
+defm : SMLoad_Pattern <"S_BUFFER_LOAD_DWORDX8",   v8f32>;
+defm : SMLoad_Pattern <"S_BUFFER_LOAD_DWORDX16",  v16f32>;
 } // End let AddedComplexity = 100
 
 let OtherPredicates = [isSICI] in {

Modified: llvm/trunk/test/CodeGen/AMDGPU/smrd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/smrd.ll?rev=348625&r1=348624&r2=348625&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/smrd.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/smrd.ll Fri Dec  7 10:41:39 2018
@@ -625,7 +625,6 @@ exit:
   ret float %sum.next
 }
 
-
 ; This test checks that the load after some control flow with an offset based
 ; on a divergent shader input is correctly recognized as divergent. This was
 ; reduced from an actual regression. Yes, the %unused argument matters, as
@@ -649,6 +648,45 @@ endif1:
   ret float %tmp97
 }
 
+; GCN-LABEL: {{^}}s_buffer_load_f32:
+; GCN: s_buffer_load_dword s0, s[0:3], s4
+define amdgpu_ps void @s_buffer_load_f32(<4 x i32> inreg %rsrc, i32 inreg %offset) {
+  %sgpr = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %rsrc, i32 %offset, i32 0)
+  call void asm sideeffect "; use $0", "s"(float %sgpr)
+  ret void
+}
+
+; GCN-LABEL: {{^}}s_buffer_load_v2f32:
+; GCN: s_buffer_load_dwordx2 s[0:1], s[0:3], s4
+define amdgpu_ps void @s_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 inreg %offset) {
+  %sgpr = call <2 x float> @llvm.amdgcn.s.buffer.load.v2f32(<4 x i32> %rsrc, i32 %offset, i32 0)
+  call void asm sideeffect "; use $0", "s"(<2 x float> %sgpr)
+  ret void
+}
+
+; GCN-LABEL: {{^}}s_buffer_load_v4f32:
+; GCN: s_buffer_load_dwordx4 s[0:3], s[0:3], s4
+define amdgpu_ps void @s_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 inreg %offset) {
+  %sgpr = call <4 x float> @llvm.amdgcn.s.buffer.load.v4f32(<4 x i32> %rsrc, i32 %offset, i32 0)
+  call void asm sideeffect "; use $0", "s"(<4 x float> %sgpr)
+  ret void
+}
+
+; GCN-LABEL: {{^}}s_buffer_load_v8f32:
+; GCN: s_buffer_load_dwordx8 s[0:7], s[0:3], s4
+define amdgpu_ps void @s_buffer_load_v8f32(<4 x i32> inreg %rsrc, i32 inreg %offset) {
+  %sgpr = call <8 x float> @llvm.amdgcn.s.buffer.load.v8f32(<4 x i32> %rsrc, i32 %offset, i32 0)
+  call void asm sideeffect "; use $0", "s"(<8 x float> %sgpr)
+  ret void
+}
+
+; GCN-LABEL: {{^}}s_buffer_load_v16f32:
+; GCN: s_buffer_load_dwordx16 s[0:15], s[0:3], s4
+define amdgpu_ps void @s_buffer_load_v16f32(<4 x i32> inreg %rsrc, i32 inreg %offset) {
+  %sgpr = call <16 x float> @llvm.amdgcn.s.buffer.load.v16f32(<4 x i32> %rsrc, i32 %offset, i32 0)
+  call void asm sideeffect "; use $0", "s"(<16 x float> %sgpr)
+  ret void
+}
 
 declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
 declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
@@ -660,6 +698,12 @@ declare <4 x i32> @llvm.amdgcn.s.buffer.
 declare <8 x i32> @llvm.amdgcn.s.buffer.load.v8i32(<4 x i32>, i32, i32)
 declare <16 x i32> @llvm.amdgcn.s.buffer.load.v16i32(<4 x i32>, i32, i32)
 
+declare float @llvm.amdgcn.s.buffer.load.f32(<4 x i32>, i32, i32)
+declare <2 x float> @llvm.amdgcn.s.buffer.load.v2f32(<4 x i32>, i32, i32)
+declare <4 x float> @llvm.amdgcn.s.buffer.load.v4f32(<4 x i32>, i32, i32)
+declare <8 x float> @llvm.amdgcn.s.buffer.load.v8f32(<4 x i32>, i32, i32)
+declare <16 x float> @llvm.amdgcn.s.buffer.load.v16f32(<4 x i32>, i32, i32)
+
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readnone }
 attributes #2 = { nounwind readnone speculatable }




More information about the llvm-commits mailing list