[llvm] r287018 - AMDGPU: Analyze mubuf with immediate soffset

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 15 12:14:27 PST 2016


Author: arsenm
Date: Tue Nov 15 14:14:27 2016
New Revision: 287018

URL: http://llvm.org/viewvc/llvm-project?rev=287018&view=rev
Log:
AMDGPU: Analyze mubuf with immediate soffset

Fixes giving up on clustering common addr64 accesses with
constant 0 soffset.

Modified:
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
    llvm/trunk/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp?rev=287018&r1=287017&r2=287018&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp Tue Nov 15 14:14:27 2016
@@ -265,7 +265,8 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(
   }
 
   if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
-    if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset) != -1)
+    const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset);
+    if (SOffset && SOffset->isReg())
       return false;
 
     const MachineOperand *AddrReg =
@@ -277,6 +278,10 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(
         getNamedOperand(LdSt, AMDGPU::OpName::offset);
     BaseReg = AddrReg->getReg();
     Offset = OffsetImm->getImm();
+
+    if (SOffset) // soffset can be an inline immediate.
+      Offset += SOffset->getImm();
+
     return true;
   }
 

Modified: llvm/trunk/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll?rev=287018&r1=287017&r2=287018&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll Tue Nov 15 14:14:27 2016
@@ -3,6 +3,7 @@
 declare void @llvm.SI.tbuffer.store.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
 declare void @llvm.SI.tbuffer.store.v4i32(<16 x i8>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
 declare void @llvm.amdgcn.s.barrier() #1
+declare i32 @llvm.amdgcn.workitem.id.x() #2
 
 
 @stored_lds_ptr = addrspace(3) global i32 addrspace(3)* undef, align 4
@@ -205,6 +206,38 @@ define void @reorder_global_offsets(i32
   ret void
 }
 
+; FUNC-LABEL: {{^}}reorder_global_offsets_addr64_soffset0:
+; GCN: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64{{$}}
+; GCN: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:20{{$}}
+; GCN: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:12{{$}}
+; GCN: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:28{{$}}
+; GCN: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:44{{$}}
+; GCN: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:36{{$}}
+; GCN: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:52{{$}}
+define void @reorder_global_offsets_addr64_soffset0(i32 addrspace(1)* noalias nocapture %ptr.base) #0 {
+  %id = call i32 @llvm.amdgcn.workitem.id.x()
+  %id.ext = sext i32 %id to i64
+
+  %ptr0 = getelementptr inbounds i32, i32 addrspace(1)* %ptr.base, i64 %id.ext
+  %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3
+  %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 5
+  %ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 7
+  %ptr4 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 9
+  %ptr5 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 11
+  %ptr6 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 13
+
+  store i32 789, i32 addrspace(1)* %ptr0, align 4
+  %tmp1 = load i32, i32 addrspace(1)* %ptr1, align 4
+  store i32 123, i32 addrspace(1)* %ptr2, align 4
+  %tmp2 = load i32, i32 addrspace(1)* %ptr3, align 4
+  %add.0 = add nsw i32 %tmp1, %tmp2
+  store i32 %add.0, i32 addrspace(1)* %ptr4, align 4
+  %tmp3 = load i32, i32 addrspace(1)* %ptr5, align 4
+  %add.1 = add nsw i32 %add.0, %tmp3
+  store i32 %add.1, i32 addrspace(1)* %ptr6, align 4
+  ret void
+}
+
 ; XFUNC-LABEL: @reorder_local_load_tbuffer_store_local_load
 ; XCI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}}, 0x4
 ; XCI: TBUFFER_STORE_FORMAT
@@ -232,3 +265,4 @@ define void @reorder_global_offsets(i32
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind convergent }
+attributes #2 = { nounwind readnone }




More information about the llvm-commits mailing list