[llvm] r354066 - [AMDGPU] Ressociate 'add (add x, y), z' to use SALU

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 14 14:11:25 PST 2019


Author: rampitec
Date: Thu Feb 14 14:11:25 2019
New Revision: 354066

URL: http://llvm.org/viewvc/llvm-project?rev=354066&view=rev
Log:
[AMDGPU] Ressociate 'add (add x, y), z' to use SALU

Reassociate adds to collect scalar operands in a single
instruction when possible. That will result in a scalar
add followed by vector instead of two vector adds, thus
better utilizing SALU.

Differential Revision: https://reviews.llvm.org/D58220

Added:
    llvm/trunk/test/CodeGen/AMDGPU/reassoc-scalar.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
    llvm/trunk/test/CodeGen/AMDGPU/add3.ll

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=354066&r1=354065&r2=354066&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Thu Feb 14 14:11:25 2019
@@ -8478,6 +8478,45 @@ unsigned SITargetLowering::getFusedOpcod
   return 0;
 }
 
+// For a reassociatable opcode perform:
+// op x, (op y, z) -> op (op x, z), y, if x and z are uniform
+SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
+                                               SelectionDAG &DAG) const {
+  EVT VT = N->getValueType(0);
+  if (VT != MVT::i32 && VT != MVT::i64)
+    return SDValue();
+
+  unsigned Opc = N->getOpcode();
+  SDValue Op0 = N->getOperand(0);
+  SDValue Op1 = N->getOperand(1);
+
+  if (!(Op0->isDivergent() ^ Op1->isDivergent()))
+    return SDValue();
+
+  if (Op0->isDivergent())
+    std::swap(Op0, Op1);
+
+  if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
+    return SDValue();
+
+  SDValue Op2 = Op1.getOperand(1);
+  Op1 = Op1.getOperand(0);
+  if (!(Op1->isDivergent() ^ Op2->isDivergent()))
+    return SDValue();
+
+  if (Op1->isDivergent())
+    std::swap(Op1, Op2);
+
+  // If either operand is constant this will conflict with
+  // DAGCombiner::ReassociateOps().
+  if (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))
+    return SDValue();
+
+  SDLoc SL(N);
+  SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
+  return DAG.getNode(Opc, SL, VT, Add1, Op2);
+}
+
 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
                            EVT VT,
                            SDValue N0, SDValue N1, SDValue N2,
@@ -8526,6 +8565,10 @@ SDValue SITargetLowering::performAddComb
     return SDValue();
   }
 
+  if (SDValue V = reassociateScalarOps(N, DAG)) {
+    return V;
+  }
+
   if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
     return SDValue();
 

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h?rev=354066&r1=354065&r2=354066&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h Thu Feb 14 14:11:25 2019
@@ -155,6 +155,7 @@ private:
   SDValue performExtractVectorEltCombine(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue performInsertVectorEltCombine(SDNode *N, DAGCombinerInfo &DCI) const;
 
+  SDValue reassociateScalarOps(SDNode *N, SelectionDAG &DAG) const;
   unsigned getFusedOpcode(const SelectionDAG &DAG,
                           const SDNode *N0, const SDNode *N1) const;
   SDValue performAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;

Modified: llvm/trunk/test/CodeGen/AMDGPU/add3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/add3.ll?rev=354066&r1=354065&r2=354066&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/add3.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/add3.ll Thu Feb 14 14:11:25 2019
@@ -25,6 +25,12 @@ define amdgpu_ps float @add3(i32 %a, i32
 
 ; V_MAD_U32_U24 is given higher priority.
 define amdgpu_ps float @mad_no_add3(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
+; VI-LABEL: mad_no_add3:
+; VI:       ; %bb.0:
+; VI-NEXT:    v_mad_u32_u24 v0, v0, v1, v4
+; VI-NEXT:    v_mad_u32_u24 v0, v2, v3, v0
+; VI-NEXT:    ; return to shader part epilog
+;
 ; GFX9-LABEL: mad_no_add3:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    v_mad_u32_u24 v0, v0, v1, v4
@@ -54,13 +60,13 @@ define amdgpu_ps float @mad_no_add3(i32
 define amdgpu_ps float @add3_vgpr_b(i32 inreg %a, i32 %b, i32 inreg %c) {
 ; VI-LABEL: add3_vgpr_b:
 ; VI:       ; %bb.0:
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; VI-NEXT:    s_add_i32 s3, s3, s2
 ; VI-NEXT:    v_add_u32_e32 v0, vcc, s3, v0
 ; VI-NEXT:    ; return to shader part epilog
 ;
 ; GFX9-LABEL: add3_vgpr_b:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    v_add_u32_e32 v0, s2, v0
+; GFX9-NEXT:    s_add_i32 s3, s3, s2
 ; GFX9-NEXT:    v_add_u32_e32 v0, s3, v0
 ; GFX9-NEXT:    ; return to shader part epilog
   %x = add i32 %a, %b

Added: llvm/trunk/test/CodeGen/AMDGPU/reassoc-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/reassoc-scalar.ll?rev=354066&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/reassoc-scalar.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/reassoc-scalar.ll Thu Feb 14 14:11:25 2019
@@ -0,0 +1,113 @@
+; RUN: llc -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+
+; GCN-LABEL: reassoc_i32:
+; GCN: s_add_i32 [[ADD1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}}
+; GFX8: v_add_u32_e32 v{{[0-9]+}}, vcc, [[ADD1]], v{{[0-9]+}}
+; GFX9: v_add_u32_e32 v{{[0-9]+}}, [[ADD1]], v{{[0-9]+}}
+define amdgpu_kernel void @reassoc_i32(i32 addrspace(1)* %arg, i32 %x, i32 %y) {
+bb:
+  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %add1 = add i32 %x, %tid
+  %add2 = add i32 %add1, %y
+  store i32 %add2, i32 addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: reassoc_i32_swap_arg_order:
+; GCN:  s_add_i32 [[ADD1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}}
+; GFX8: v_add_u32_e32 v{{[0-9]+}}, vcc, [[ADD1]], v{{[0-9]+}}
+; GFX9: v_add_u32_e32 v{{[0-9]+}}, [[ADD1]], v{{[0-9]+}}
+define amdgpu_kernel void @reassoc_i32_swap_arg_order(i32 addrspace(1)* %arg, i32 %x, i32 %y) {
+bb:
+  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %add1 = add i32 %tid, %x
+  %add2 = add i32 %y, %add1
+  store i32 %add2, i32 addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: reassoc_i64:
+; GCN:      s_add_u32 [[ADD1L:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}}
+; GCN:      s_addc_u32 [[ADD1H:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}}
+; GFX8-DAG: v_add_u32_e32 v{{[0-9]+}}, vcc, [[ADD1L]], v{{[0-9]+}}
+; GFX9-DAG: v_add_co_u32_e32 v{{[0-9]+}}, vcc, [[ADD1L]], v{{[0-9]+}}
+; GCN-DAG:  v_mov_b32_e32 [[VADD1H:v[0-9]+]], [[ADD1H]]
+; GFX8:     v_addc_u32_e32 v{{[0-9]+}}, vcc, 0, [[VADD1H]], vcc
+; GFX9:     v_addc_co_u32_e32 v{{[0-9]+}}, vcc, 0, [[VADD1H]], vcc
+define amdgpu_kernel void @reassoc_i64(i64 addrspace(1)* %arg, i64 %x, i64 %y) {
+bb:
+  %tid32 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %tid = zext i32 %tid32 to i64
+  %add1 = add i64 %x, %tid
+  %add2 = add i64 %add1, %y
+  store i64 %add2, i64 addrspace(1)* %arg, align 8
+  ret void
+}
+
+; GCN-LABEL: reassoc_v2i32:
+; GCN: s_add_i32 [[ADD1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}}
+; GCN: s_add_i32 [[ADD2:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}}
+; GFX8: v_add_u32_e32 v{{[0-9]+}}, vcc, [[ADD1]], v{{[0-9]+}}
+; GFX8: v_add_u32_e32 v{{[0-9]+}}, vcc, [[ADD2]], v{{[0-9]+}}
+; GFX9: v_add_u32_e32 v{{[0-9]+}}, [[ADD1]], v{{[0-9]+}}
+; GFX9: v_add_u32_e32 v{{[0-9]+}}, [[ADD2]], v{{[0-9]+}}
+define amdgpu_kernel void @reassoc_v2i32(<2 x i32> addrspace(1)* %arg, <2 x i32> %x, <2 x i32> %y) {
+bb:
+  %t1 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %t2 = tail call i32 @llvm.amdgcn.workitem.id.y()
+  %v1 = insertelement <2 x i32> undef, i32 %t1, i32 0
+  %v2 = insertelement <2 x i32> %v1, i32 %t2, i32 1
+  %add1 = add <2 x i32> %x, %v2
+  %add2 = add <2 x i32> %add1, %y
+  store <2 x i32> %add2, <2 x i32> addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: reassoc_i32_nuw:
+; GCN:  s_add_i32 [[ADD1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}}
+; GFX8: v_add_u32_e32 v{{[0-9]+}}, vcc, [[ADD1]], v{{[0-9]+}}
+; GFX9: v_add_u32_e32 v{{[0-9]+}}, [[ADD1]], v{{[0-9]+}}
+define amdgpu_kernel void @reassoc_i32_nuw(i32 addrspace(1)* %arg, i32 %x, i32 %y) {
+bb:
+  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %add1 = add i32 %x, %tid
+  %add2 = add nuw i32 %add1, %y
+  store i32 %add2, i32 addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: reassoc_i32_multiuse:
+; GFX8: v_add_u32_e32 [[ADD1:v[0-9]+]], vcc, s{{[0-9]+}}, v{{[0-9]+}}
+; GFX9: v_add_u32_e32 [[ADD1:v[0-9]+]], s{{[0-9]+}}, v{{[0-9]+}}
+; GFX8: v_add_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, [[ADD1]]
+; GFX9: v_add_u32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[ADD1]]
+define amdgpu_kernel void @reassoc_i32_multiuse(i32 addrspace(1)* %arg, i32 %x, i32 %y) {
+bb:
+  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %add1 = add i32 %x, %tid
+  %add2 = add i32 %add1, %y
+  store volatile i32 %add1, i32 addrspace(1)* %arg, align 4
+  store volatile i32 %add2, i32 addrspace(1)* %arg, align 4
+  ret void
+}
+
+; TODO: This should be reassociated as well, however it is disabled to avoid endless
+;       loop since DAGCombiner::ReassociateOps() reverts the reassociation.
+; GCN-LABEL: reassoc_i32_const:
+; GFX8: v_add_u32_e32 [[ADD1:v[0-9]+]], vcc, 42, v{{[0-9]+}}
+; GFX9: v_add_u32_e32 [[ADD1:v[0-9]+]],  42, v{{[0-9]+}}
+; GFX8: v_add_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, [[ADD1]]
+; GFX9: v_add_u32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[ADD1]]
+define amdgpu_kernel void @reassoc_i32_const(i32 addrspace(1)* %arg, i32 %x) {
+bb:
+  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %add1 = add i32 %tid, 42
+  %add2 = add i32 %add1, %x
+  store volatile i32 %add1, i32 addrspace(1)* %arg, align 4
+  store volatile i32 %add2, i32 addrspace(1)* %arg, align 4
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x()
+declare i32 @llvm.amdgcn.workitem.id.y()




More information about the llvm-commits mailing list