[llvm] r328652 - AMDGPU: Fix crash when MachinePointerInfo invalid

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 27 11:39:45 PDT 2018


Author: arsenm
Date: Tue Mar 27 11:39:45 2018
New Revision: 328652

URL: http://llvm.org/viewvc/llvm-project?rev=328652&view=rev
Log:
AMDGPU: Fix crash when MachinePointerInfo invalid

The combine on a select of a load only triggers for
addrspace 0, and discards the MachinePointerInfo. The
conservative default needs to be used for this.

Added:
    llvm/trunk/test/CodeGen/AMDGPU/load-select-ptr.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=328652&r1=328651&r2=328652&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Tue Mar 27 11:39:45 2018
@@ -1087,7 +1087,7 @@ bool SITargetLowering::isNoopAddrSpaceCa
 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
   const MemSDNode *MemNode = cast<MemSDNode>(N);
   const Value *Ptr = MemNode->getMemOperand()->getValue();
-  const Instruction *I = dyn_cast<Instruction>(Ptr);
+  const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
   return I && I->getMetadata("amdgpu.noclobber");
 }
 

Added: llvm/trunk/test/CodeGen/AMDGPU/load-select-ptr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-select-ptr.ll?rev=328652&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/load-select-ptr.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/load-select-ptr.ll Tue Mar 27 11:39:45 2018
@@ -0,0 +1,82 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; Combine on select c, (load x), (load y) -> load (select c, x, y)
+; drops MachinePointerInfo, so it can't be relied on for correctness.
+
+; GCN-LABEL: {{^}}select_ptr_crash_i64_flat:
+; GCN: s_load_dwordx2
+; GCN: s_load_dwordx2
+; GCN: s_load_dwordx2
+
+; GCN: v_cmp_eq_u32
+; GCN: v_cndmask_b32
+; GCN: v_cndmask_b32
+
+; GCN-NOT: load_dword
+; GCN: flat_load_dwordx2
+; GCN-NOT: load_dword
+
+; GCN: flat_store_dwordx2
+define amdgpu_kernel void @select_ptr_crash_i64_flat(i32 %tmp, i64* %ptr0, i64* %ptr1, i64 addrspace(1)* %ptr2) {
+  %tmp2 = icmp eq i32 %tmp, 0
+  %tmp3 = load i64, i64* %ptr0, align 8
+  %tmp4 = load i64, i64* %ptr1, align 8
+  %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4
+  store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8
+  ret void
+}
+
+; The transform currently doesn't happen for non-addrspace 0, but it
+; should.
+
+; GCN-LABEL: {{^}}select_ptr_crash_i64_global:
+; GCN: s_load_dwordx2
+; GCN: s_load_dwordx2
+; GCN: s_load_dwordx2
+; GCN: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}}
+; GCN: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}}
+; GCN: v_cndmask_b32
+; GCN: v_cndmask_b32
+; GCN: flat_store_dwordx2
+define amdgpu_kernel void @select_ptr_crash_i64_global(i32 %tmp, i64 addrspace(1)* %ptr0, i64 addrspace(1)* %ptr1, i64 addrspace(1)* %ptr2) {
+  %tmp2 = icmp eq i32 %tmp, 0
+  %tmp3 = load i64, i64 addrspace(1)* %ptr0, align 8
+  %tmp4 = load i64, i64 addrspace(1)* %ptr1, align 8
+  %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4
+  store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8
+  ret void
+}
+
+; GCN-LABEL: {{^}}select_ptr_crash_i64_local:
+; GCN: ds_read_b64
+; GCN: ds_read_b64
+; GCN: v_cndmask_b32
+; GCN: v_cndmask_b32
+; GCN: flat_store_dwordx2
+define amdgpu_kernel void @select_ptr_crash_i64_local(i32 %tmp, i64 addrspace(3)* %ptr0, i64 addrspace(3)* %ptr1, i64 addrspace(1)* %ptr2) {
+  %tmp2 = icmp eq i32 %tmp, 0
+  %tmp3 = load i64, i64 addrspace(3)* %ptr0, align 8
+  %tmp4 = load i64, i64 addrspace(3)* %ptr1, align 8
+  %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4
+  store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8
+  ret void
+}
+
+; The transform will break addressing mode matching, so unclear it
+; would be good to do
+
+; GCN-LABEL: {{^}}select_ptr_crash_i64_local_offsets:
+; GCN: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:128
+; GCN: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:512
+; GCN: v_cndmask_b32
+; GCN: v_cndmask_b32
+define amdgpu_kernel void @select_ptr_crash_i64_local_offsets(i32 %tmp, i64 addrspace(3)* %ptr0, i64 addrspace(3)* %ptr1, i64 addrspace(1)* %ptr2) {
+  %tmp2 = icmp eq i32 %tmp, 0
+  %gep0 = getelementptr inbounds i64, i64 addrspace(3)* %ptr0, i64 16
+  %gep1 = getelementptr inbounds i64, i64 addrspace(3)* %ptr1, i64 64
+  %tmp3 = load i64, i64 addrspace(3)* %gep0, align 8
+  %tmp4 = load i64, i64 addrspace(3)* %gep1, align 8
+  %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4
+  store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8
+  ret void
+}




More information about the llvm-commits mailing list