[llvm] r246356 - AMDGPU: Fix dropping mem operands when moving to VALU

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 28 23:48:47 PDT 2015


Author: arsenm
Date: Sat Aug 29 01:48:46 2015
New Revision: 246356

URL: http://llvm.org/viewvc/llvm-project?rev=246356&view=rev
Log:
AMDGPU: Fix dropping mem operands when moving to VALU

Without a memory operand, mayLoad or mayStore instructions
are treated as hasUnorderedMemRef, which results in much worse
scheduling.

We really should have a verifier check that any
non-side effecting mayLoad or mayStore has a memory operand.
There are a few instructions (interp and images) which I'm
not sure what / where to add these.

Added:
    llvm/trunk/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp?rev=246356&r1=246355&r2=246356&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp Sat Aug 29 01:48:46 2015
@@ -1888,17 +1888,18 @@ void SIInstrInfo::legalizeOperands(Machi
       // Create the new instruction.
       unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode());
       MachineInstr *Addr64 =
-          BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode))
-                  .addOperand(*VData)
-                  .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
-                                              // This will be replaced later
-                                              // with the new value of vaddr.
-                  .addOperand(*SRsrc)
-                  .addOperand(*SOffset)
-                  .addOperand(*Offset)
-                  .addImm(0) // glc
-                  .addImm(0) // slc
-                  .addImm(0); // tfe
+        BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode))
+        .addOperand(*VData)
+        .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
+                                    // This will be replaced later
+                                    // with the new value of vaddr.
+        .addOperand(*SRsrc)
+        .addOperand(*SOffset)
+        .addOperand(*Offset)
+        .addImm(0) // glc
+        .addImm(0) // slc
+        .addImm(0) // tfe
+        .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
 
       MI->removeFromParent();
       MI = Addr64;

Added: llvm/trunk/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll?rev=246356&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll Sat Aug 29 01:48:46 2015
@@ -0,0 +1,52 @@
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
+
+; The memory operand was dropped from the buffer_load_dword_offset
+; when replaced with the addr64 during operand legalization, resulting
+; in the global loads not being scheduled together.
+
+; GCN-LABEL: {{^}}reschedule_global_load_lds_store:
+; GCN: buffer_load_dword
+; GCN: buffer_load_dword
+; GCN: ds_write_b32
+; GCN: ds_write_b32
+; GCN: s_endpgm
+define void @reschedule_global_load_lds_store(i32 addrspace(1)* noalias %gptr0, i32 addrspace(1)* noalias %gptr1, i32 addrspace(3)* noalias %lptr, i32 %c) #0 {
+entry:
+  %tid = tail call i32 @llvm.r600.read.tidig.x() #1
+  %idx = shl i32 %tid, 2
+  %gep0 = getelementptr i32, i32 addrspace(1)* %gptr0, i32 %idx
+  %gep1 = getelementptr i32, i32 addrspace(1)* %gptr1, i32 %idx
+  %gep2 = getelementptr i32, i32 addrspace(3)* %lptr, i32 %tid
+  %cmp0 = icmp eq i32 %c, 0
+  br i1 %cmp0, label %for.body, label %exit
+
+for.body:                                         ; preds = %for.body, %entry
+  %i = phi i32 [ 0, %entry ], [ %i.inc, %for.body ]
+  %gptr0.phi = phi i32 addrspace(1)* [ %gep0, %entry ], [ %gep0.inc, %for.body ]
+  %gptr1.phi = phi i32 addrspace(1)* [ %gep1, %entry ], [ %gep1.inc, %for.body ]
+  %lptr0.phi = phi i32 addrspace(3)* [ %gep2, %entry ], [ %gep2.inc, %for.body ]
+  %lptr1 = getelementptr i32, i32 addrspace(3)* %lptr0.phi, i32 1
+  %val0 = load i32, i32 addrspace(1)* %gep0
+  store i32 %val0, i32 addrspace(3)* %lptr0.phi
+  %val1 = load i32, i32 addrspace(1)* %gep1
+  store i32 %val1, i32 addrspace(3)* %lptr1
+  %gep0.inc = getelementptr i32, i32 addrspace(1)* %gptr0.phi, i32 4
+  %gep1.inc = getelementptr i32, i32 addrspace(1)* %gptr1.phi, i32 4
+  %gep2.inc = getelementptr i32, i32 addrspace(3)* %lptr0.phi, i32 4
+  %i.inc = add nsw i32 %i, 1
+  %cmp1 = icmp ne i32 %i, 256
+  br i1 %cmp1, label %for.body, label %exit
+
+exit:                                             ; preds = %for.body, %entry
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.r600.read.tidig.x() #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.r600.read.tgid.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { noduplicate nounwind }




More information about the llvm-commits mailing list