[llvm] r213031 - R600: Add dag combine for copy of an illegal type.

Matt Arsenault Matthew.Arsenault at amd.com
Mon Jul 14 19:06:32 PDT 2014


Author: arsenm
Date: Mon Jul 14 21:06:31 2014
New Revision: 213031

URL: http://llvm.org/viewvc/llvm-project?rev=213031&view=rev
Log:
R600: Add dag combine for copy of an illegal type.

This helps avoid redundant instructions to unpack, and repack
the vectors. Ideally we could recognize that pattern and eliminate
it. Currently v4i8 and other small element type vectors are scalarized,
so this has the added bonus of avoiding that.

Added:
    llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll
Modified:
    llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp
    llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h
    llvm/trunk/test/CodeGen/R600/indirect-private-64.ll
    llvm/trunk/test/CodeGen/R600/load.ll

Modified: llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp?rev=213031&r1=213030&r2=213031&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp Mon Jul 14 21:06:31 2014
@@ -360,6 +360,7 @@ AMDGPUTargetLowering::AMDGPUTargetLoweri
 
   setTargetDAGCombine(ISD::MUL);
   setTargetDAGCombine(ISD::SELECT_CC);
+  setTargetDAGCombine(ISD::STORE);
 
   setSchedulingPreference(Sched::RegPressure);
   setJumpIsExpensive(true);
@@ -1896,6 +1897,56 @@ static SDValue constantFoldBFE(Selection
   return DAG.getConstant(Src0 >> Offset, MVT::i32);
 }
 
+static bool usesAllNormalStores(SDNode *LoadVal) {
+  for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
+    if (!ISD::isNormalStore(*I))
+      return false;
+  }
+
+  return true;
+}
+
+// If we have a copy of an illegal type, replace it with a load / store of an
+// equivalently sized legal type. This avoids intermediate bit pack / unpack
+// instructions emitted when handling extloads and truncstores. Ideally we could
+// recognize the pack / unpack pattern to eliminate it.
+SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
+                                                  DAGCombinerInfo &DCI) const {
+  if (!DCI.isBeforeLegalize())
+    return SDValue();
+
+  StoreSDNode *SN = cast<StoreSDNode>(N);
+  SDValue Value = SN->getValue();
+  EVT VT = Value.getValueType();
+
+  if (isTypeLegal(VT) || SN->isVolatile() || !ISD::isNormalLoad(Value.getNode()))
+    return SDValue();
+
+  LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
+  if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
+    return SDValue();
+
+  EVT MemVT = LoadVal->getMemoryVT();
+
+  SDLoc SL(N);
+  SelectionDAG &DAG = DCI.DAG;
+  EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
+
+  SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
+                                LoadVT, SL,
+                                LoadVal->getChain(),
+                                LoadVal->getBasePtr(),
+                                LoadVal->getOffset(),
+                                LoadVT,
+                                LoadVal->getMemOperand());
+
+  SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
+  DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
+
+  return DAG.getStore(SN->getChain(), SL, NewLoad,
+                      SN->getBasePtr(), SN->getMemOperand());
+}
+
 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {
   EVT VT = N->getValueType(0);
@@ -1928,7 +1979,7 @@ SDValue AMDGPUTargetLowering::performMul
 }
 
 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
-                                            DAGCombinerInfo &DCI) const {
+                                                DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
   SDLoc DL(N);
 
@@ -2026,6 +2077,9 @@ SDValue AMDGPUTargetLowering::PerformDAG
 
     break;
   }
+
+  case ISD::STORE:
+    return performStoreCombine(N, DCI);
   }
   return SDValue();
 }

Modified: llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h?rev=213031&r1=213030&r2=213031&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h Mon Jul 14 21:06:31 2014
@@ -64,6 +64,7 @@ private:
                                   SelectionDAG &DAG) const;
   SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
 
+  SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
 
 protected:

Added: llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll?rev=213031&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll (added)
+++ llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll Mon Jul 14 21:06:31 2014
@@ -0,0 +1,166 @@
+; RUN: llc -march=r600 -mcpu=tahiti < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @test_copy_v4i8
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+  %val = load <4 x i8> addrspace(1)* %in, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
+  ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x2
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
+  %val = load <4 x i8> addrspace(1)* %in, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
+  ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x3
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
+  %val = load <4 x i8> addrspace(1)* %in, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
+  ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x4
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %out3, <4 x i8> addrspace(1)* %in) nounwind {
+  %val = load <4 x i8> addrspace(1)* %in, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out3, align 4
+  ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_extra_use
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI_DAG: BUFFER_STORE_BYTE
+
+; After scalarizing v4i8 loads is fixed.
+; XSI: BUFFER_LOAD_DWORD
+; XSI: V_BFE
+; XSI: V_ADD
+; XSI: V_ADD
+; XSI: V_ADD
+; XSI: BUFFER_STORE_DWORD
+; XSI: BUFFER_STORE_DWORD
+
+; SI: S_ENDPGM
+define void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
+  %val = load <4 x i8> addrspace(1)* %in, align 4
+  %add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+  store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
+  ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x2_extra_use
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI_DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI_DAG: BUFFER_STORE_BYTE
+
+; XSI: BUFFER_LOAD_DWORD
+; XSI: BFE
+; XSI: BUFFER_STORE_DWORD
+; XSI: V_ADD
+; XSI: BUFFER_STORE_DWORD
+; XSI-NEXT: BUFFER_STORE_DWORD
+
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
+  %val = load <4 x i8> addrspace(1)* %in, align 4
+  %add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+  store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
+  ret void
+}
+
+; FUNC-LABEL: @test_copy_v3i8
+; SI-NOT: BFE
+; SI-NOT: BFI
+; SI: S_ENDPGM
+define void @test_copy_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
+  %val = load <3 x i8> addrspace(1)* %in, align 4
+  store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 4
+  ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_volatile_load
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: S_ENDPGM
+define void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+  %val = load volatile <4 x i8> addrspace(1)* %in, align 4
+  store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
+  ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_volatile_store
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_STORE_BYTE
+; SI: BUFFER_STORE_BYTE
+; SI: BUFFER_STORE_BYTE
+; SI: BUFFER_STORE_BYTE
+; SI: S_ENDPGM
+define void @test_copy_v4i8_volatile_store(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+  %val = load <4 x i8> addrspace(1)* %in, align 4
+  store volatile <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
+  ret void
+}

Modified: llvm/trunk/test/CodeGen/R600/indirect-private-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/indirect-private-64.ll?rev=213031&r1=213030&r2=213031&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/indirect-private-64.ll (original)
+++ llvm/trunk/test/CodeGen/R600/indirect-private-64.ll Mon Jul 14 21:06:31 2014
@@ -31,10 +31,14 @@ define void @private_access_f64_alloca(d
 ; SI-ALLOCA: V_MOVRELS_B32_e32
 ; SI-ALLOCA: V_MOVRELS_B32_e32
 
-; SI-PROMOTE: DS_WRITE_B64
-; SI-PROMOTE: DS_WRITE_B64
-; SI-PROMOTE: DS_READ_B64
-; SI-PROMOTE: DS_READ_B64
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
 define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind {
   %val = load <2 x double> addrspace(1)* %in, align 16
   %array = alloca <2 x double>, i32 16, align 16
@@ -77,10 +81,14 @@ define void @private_access_i64_alloca(i
 ; SI-ALLOCA: V_MOVRELS_B32_e32
 ; SI-ALLOCA: V_MOVRELS_B32_e32
 
-; SI-PROMOTE: DS_WRITE_B64
-; SI-PROMOTE: DS_WRITE_B64
-; SI-PROMOTE: DS_READ_B64
-; SI-PROMOTE: DS_READ_B64
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
 define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind {
   %val = load <2 x i64> addrspace(1)* %in, align 16
   %array = alloca <2 x i64>, i32 16, align 16

Modified: llvm/trunk/test/CodeGen/R600/load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/load.ll?rev=213031&r1=213030&r2=213031&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/load.ll (original)
+++ llvm/trunk/test/CodeGen/R600/load.ll Mon Jul 14 21:06:31 2014
@@ -254,8 +254,8 @@ entry:
 
 ; load a v2f32 value from the global address space
 ; FUNC-LABEL: @load_v2f32
+; R600-CHECK: MEM_RAT
 ; R600-CHECK: VTX_READ_64
-
 ; SI-CHECK: BUFFER_LOAD_DWORDX2
 define void @load_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) {
 entry:
@@ -265,9 +265,7 @@ entry:
 }
 
 ; FUNC-LABEL: @load_i64
-; R600-CHECK: MEM_RAT
-; R600-CHECK: MEM_RAT
-
+; R600-CHECK: VTX_READ_64
 ; SI-CHECK: BUFFER_LOAD_DWORDX2
 define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
 entry:





More information about the llvm-commits mailing list