[llvm] r255508 - AMDGPU: Fix splitting vector loads with existing offsets

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 14 08:59:41 PST 2015


Author: arsenm
Date: Mon Dec 14 10:59:40 2015
New Revision: 255508

URL: http://llvm.org/viewvc/llvm-project?rev=255508&view=rev
Log:
AMDGPU: Fix splitting vector loads with existing offsets

If the original MMO had an offset, it was dropped.
Also use the correct alignment after adding the new offset.

Added:
    llvm/trunk/test/CodeGen/AMDGPU/split-vector-memoperand-offsets.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp?rev=255508&r1=255507&r2=255508&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp Mon Dec 14 10:59:40 2015
@@ -1215,7 +1215,8 @@ SDValue AMDGPUTargetLowering::SplitVecto
   EVT PtrVT = BasePtr.getValueType();
   EVT MemVT = Load->getMemoryVT();
   SDLoc SL(Op);
-  MachinePointerInfo SrcValue(Load->getMemOperand()->getValue());
+
+  const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
 
   EVT LoVT, HiVT;
   EVT LoMemVT, HiMemVT;
@@ -1224,23 +1225,27 @@ SDValue AMDGPUTargetLowering::SplitVecto
   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
   std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
   std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT);
+
+  unsigned Size = LoMemVT.getStoreSize();
+  unsigned BaseAlign = Load->getAlignment();
+  unsigned HiAlign = MinAlign(BaseAlign, Size);
+
   SDValue LoLoad
     = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
                      Load->getChain(), BasePtr,
                      SrcValue,
                      LoMemVT, Load->isVolatile(), Load->isNonTemporal(),
-                     Load->isInvariant(), Load->getAlignment());
+                     Load->isInvariant(), BaseAlign);
 
   SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
-                              DAG.getConstant(LoMemVT.getStoreSize(), SL,
-                                              PtrVT));
+                              DAG.getConstant(Size, SL, PtrVT));
 
   SDValue HiLoad
     = DAG.getExtLoad(Load->getExtensionType(), SL, HiVT,
                      Load->getChain(), HiPtr,
                      SrcValue.getWithOffset(LoMemVT.getStoreSize()),
                      HiMemVT, Load->isVolatile(), Load->isNonTemporal(),
-                     Load->isInvariant(), Load->getAlignment());
+                     Load->isInvariant(), HiAlign);
 
   SDValue Ops[] = {
     DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
@@ -1370,7 +1375,11 @@ SDValue AMDGPUTargetLowering::SplitVecto
                               DAG.getConstant(LoMemVT.getStoreSize(), SL,
                                               PtrVT));
 
-  MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
+  const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
+  unsigned BaseAlign = Store->getAlignment();
+  unsigned Size = LoMemVT.getStoreSize();
+  unsigned HiAlign = MinAlign(BaseAlign, Size);
+
   SDValue LoStore
     = DAG.getTruncStore(Chain, SL, Lo,
                         BasePtr,
@@ -1378,15 +1387,15 @@ SDValue AMDGPUTargetLowering::SplitVecto
                         LoMemVT,
                         Store->isNonTemporal(),
                         Store->isVolatile(),
-                        Store->getAlignment());
+                        BaseAlign);
   SDValue HiStore
     = DAG.getTruncStore(Chain, SL, Hi,
                         HiPtr,
-                        SrcValue.getWithOffset(LoMemVT.getStoreSize()),
+                        SrcValue.getWithOffset(Size),
                         HiMemVT,
                         Store->isNonTemporal(),
                         Store->isVolatile(),
-                        Store->getAlignment());
+                        HiAlign);
 
   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
 }

Added: llvm/trunk/test/CodeGen/AMDGPU/split-vector-memoperand-offsets.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/split-vector-memoperand-offsets.ll?rev=255508&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/split-vector-memoperand-offsets.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/split-vector-memoperand-offsets.ll Mon Dec 14 10:59:40 2015
@@ -0,0 +1,104 @@
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs -mattr=-promote-alloca < %s | FileCheck -check-prefix=GCN %s
+
+ at sPrivateStorage = external addrspace(3) global [256 x [8 x <4 x i64>]]
+
+; GCN-LABEL: {{^}}ds_reorder_vector_split:
+
+; Write zeroinitializer
+; GCN-DAG: ds_write_b64 [[PTR:v[0-9]+]], [[VAL:v\[[0-9]+:[0-9]+\]]] offset:24
+; GCN-DAG: ds_write_b64 [[PTR]], [[VAL]] offset:16
+; GCN-DAG: ds_write_b64 [[PTR]], [[VAL]] offset:8
+; GCN-DAG: ds_write_b64 [[PTR]], [[VAL]]{{$}}
+
+; GCN: s_waitcnt vmcnt
+
+; GCN-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:24
+; GCN-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:16
+; GCN-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:8
+
+; GCN: s_waitcnt lgkmcnt
+
+; GCN-DAG ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:8
+; GCN-DAG: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:16
+; GCN-DAG: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:24
+
+; Appears to be dead store of vector component.
+; GCN: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]$}}
+
+; GCN: buffer_store_dwordx2
+; GCN: buffer_store_dwordx2
+; GCN: buffer_store_dwordx2
+; GCN: buffer_store_dwordx2
+; GCN: s_endpgm
+define void @ds_reorder_vector_split(<4 x i64> addrspace(1)* nocapture readonly %srcValues, i32 addrspace(1)* nocapture readonly %offsets, <4 x i64> addrspace(1)* nocapture %destBuffer, i32 %alignmentOffset) #0 {
+entry:
+  %tmp = tail call i32 @llvm.r600.read.local.size.y()
+  %tmp1 = tail call i32 @llvm.r600.read.local.size.z()
+  %tmp2 = tail call i32 @llvm.r600.read.tidig.x()
+  %tmp3 = tail call i32 @llvm.r600.read.tidig.y()
+  %tmp4 = tail call i32 @llvm.r600.read.tidig.z()
+  %tmp6 = mul i32 %tmp2, %tmp
+  %tmp10 = add i32 %tmp3, %tmp6
+  %tmp11 = mul i32 %tmp10, %tmp1
+  %tmp9 = add i32 %tmp11, %tmp4
+  %x.i.i = tail call i32 @llvm.r600.read.tgid.x() #1
+  %x.i.12.i = tail call i32 @llvm.r600.read.local.size.x() #1
+  %mul.26.i = mul i32 %x.i.12.i, %x.i.i
+  %add.i = add i32 %tmp2, %mul.26.i
+  %arrayidx = getelementptr [256 x [8 x <4 x i64>]], [256 x [8 x <4 x i64>]] addrspace(3)* @sPrivateStorage, i32 0, i32 %tmp9, i32 %add.i
+  store <4 x i64> zeroinitializer, <4 x i64> addrspace(3)* %arrayidx
+  %tmp12 = sext i32 %add.i to i64
+  %arrayidx1 = getelementptr inbounds <4 x i64>, <4 x i64> addrspace(1)* %srcValues, i64 %tmp12
+  %tmp13 = load <4 x i64>, <4 x i64> addrspace(1)* %arrayidx1
+  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %offsets, i64 %tmp12
+  %tmp14 = load i32, i32 addrspace(1)* %arrayidx2
+  %add.ptr = getelementptr [256 x [8 x <4 x i64>]], [256 x [8 x <4 x i64>]] addrspace(3)* @sPrivateStorage, i32 0, i32 %tmp9, i32 0, i32 %alignmentOffset
+  %mul.i = shl i32 %tmp14, 2
+  %arrayidx.i = getelementptr inbounds i64, i64 addrspace(3)* %add.ptr, i32 %mul.i
+  %tmp15 = bitcast i64 addrspace(3)* %arrayidx.i to <4 x i64> addrspace(3)*
+  store <4 x i64> %tmp13, <4 x i64> addrspace(3)* %tmp15
+  %add.ptr6 = getelementptr [256 x [8 x <4 x i64>]], [256 x [8 x <4 x i64>]] addrspace(3)* @sPrivateStorage, i32 0, i32 %tmp9, i32 %tmp14, i32 %alignmentOffset
+  %tmp16 = sext i32 %tmp14 to i64
+  %tmp17 = sext i32 %alignmentOffset to i64
+  %add.ptr9 = getelementptr inbounds <4 x i64>, <4 x i64> addrspace(1)* %destBuffer, i64 %tmp16, i64 %tmp17
+  %tmp18 = bitcast <4 x i64> %tmp13 to i256
+  %trunc = trunc i256 %tmp18 to i64
+  store i64 %trunc, i64 addrspace(1)* %add.ptr9
+  %arrayidx10.1 = getelementptr inbounds i64, i64 addrspace(3)* %add.ptr6, i32 1
+  %tmp19 = load i64, i64 addrspace(3)* %arrayidx10.1
+  %arrayidx11.1 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr9, i64 1
+  store i64 %tmp19, i64 addrspace(1)* %arrayidx11.1
+  %arrayidx10.2 = getelementptr inbounds i64, i64 addrspace(3)* %add.ptr6, i32 2
+  %tmp20 = load i64, i64 addrspace(3)* %arrayidx10.2
+  %arrayidx11.2 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr9, i64 2
+  store i64 %tmp20, i64 addrspace(1)* %arrayidx11.2
+  %arrayidx10.3 = getelementptr inbounds i64, i64 addrspace(3)* %add.ptr6, i32 3
+  %tmp21 = load i64, i64 addrspace(3)* %arrayidx10.3
+  %arrayidx11.3 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr9, i64 3
+  store i64 %tmp21, i64 addrspace(1)* %arrayidx11.3
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.r600.read.tgid.x() #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.r600.read.local.size.x() #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.r600.read.tidig.x() #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.r600.read.local.size.y() #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.r600.read.local.size.z() #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.r600.read.tidig.y() #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.r600.read.tidig.z() #1
+
+attributes #0 = { norecurse nounwind }
+attributes #1 = { nounwind readnone }




More information about the llvm-commits mailing list