[llvm] r305964 - [AMDGPU] Combine add and adde, sub and sube
Stanislav Mekhanoshin via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 21 15:30:01 PDT 2017
Author: rampitec
Date: Wed Jun 21 17:30:01 2017
New Revision: 305964
URL: http://llvm.org/viewvc/llvm-project?rev=305964&view=rev
Log:
[AMDGPU] Combine add and adde, sub and sube
If one of the arguments of adde/sube is zero we can fold another
add/sub into it.
Differential Revision: https://reviews.llvm.org/D34374
Modified:
llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
llvm/trunk/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=305964&r1=305963&r2=305964&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Wed Jun 21 17:30:01 2017
@@ -475,6 +475,9 @@ SITargetLowering::SITargetLowering(const
}
setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::ADDCARRY);
+ setTargetDAGCombine(ISD::SUB);
+ setTargetDAGCombine(ISD::SUBCARRY);
setTargetDAGCombine(ISD::FADD);
setTargetDAGCombine(ISD::FSUB);
setTargetDAGCombine(ISD::FMINNUM);
@@ -4859,19 +4862,81 @@ SDValue SITargetLowering::performAddComb
// add x, sext (setcc) => subcarry x, 0, setcc
unsigned Opc = LHS.getOpcode();
if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
- Opc == ISD::ANY_EXTEND)
+ Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
std::swap(RHS, LHS);
Opc = RHS.getOpcode();
- if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
- Opc == ISD::ANY_EXTEND) {
+ switch (Opc) {
+ default: break;
+ case ISD::ZERO_EXTEND:
+ case ISD::SIGN_EXTEND:
+ case ISD::ANY_EXTEND: {
auto Cond = RHS.getOperand(0);
- if (Cond.getOpcode() == ISD::SETCC) {
- SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
- SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
- Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
- return DAG.getNode(Opc, SL, VTList, Args);
- }
+ if (Cond.getOpcode() != ISD::SETCC) break;
+ SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
+ SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
+ Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
+ return DAG.getNode(Opc, SL, VTList, Args);
+ }
+ case ISD::ADDCARRY: {
+ // add x, (addcarry y, 0, cc) => addcarry x, y, cc
+ auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
+ if (!C || C->getZExtValue() != 0) break;
+ SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
+ return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
+ }
+ }
+ return SDValue();
+}
+
+SDValue SITargetLowering::performSubCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = N->getValueType(0);
+
+ if (VT != MVT::i32)
+ return SDValue();
+
+ SDLoc SL(N);
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ unsigned Opc = LHS.getOpcode();
+ if (Opc != ISD::SUBCARRY)
+ std::swap(RHS, LHS);
+
+ if (LHS.getOpcode() == ISD::SUBCARRY) {
+ // sub (subcarry x, 0, cc), y => subcarry x, y, cc
+ auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
+ if (!C || C->getZExtValue() != 0)
+ return SDValue();
+ SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
+ return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
+ }
+ return SDValue();
+}
+
+SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+
+ if (N->getValueType(0) != MVT::i32)
+ return SDValue();
+
+ auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!C || C->getZExtValue() != 0)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDValue LHS = N->getOperand(0);
+
+ // addcarry (add x, y), 0, cc => addcarry x, y, cc
+ // subcarry (sub x, y), 0, cc => subcarry x, y, cc
+ unsigned LHSOpc = LHS.getOpcode();
+ unsigned Opc = N->getOpcode();
+ if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
+ (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
+ SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
+ return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
}
return SDValue();
}
@@ -5048,6 +5113,11 @@ SDValue SITargetLowering::PerformDAGComb
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
case ISD::ADD:
return performAddCombine(N, DCI);
+ case ISD::SUB:
+ return performSubCombine(N, DCI);
+ case ISD::ADDCARRY:
+ case ISD::SUBCARRY:
+ return performAddCarrySubCarryCombine(N, DCI);
case ISD::FADD:
return performFAddCombine(N, DCI);
case ISD::FSUB:
Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h?rev=305964&r1=305963&r2=305964&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h Wed Jun 21 17:30:01 2017
@@ -109,6 +109,8 @@ class SITargetLowering final : public AM
unsigned getFusedOpcode(const SelectionDAG &DAG,
const SDNode *N0, const SDNode *N1) const;
SDValue performAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performAddCarrySubCarryCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performFAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performFSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performSetCCCombine(SDNode *N, DAGCombinerInfo &DCI) const;
Modified: llvm/trunk/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/combine-cond-add-sub.ll?rev=305964&r1=305963&r2=305964&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/combine-cond-add-sub.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/combine-cond-add-sub.ll Wed Jun 21 17:30:01 2017
@@ -36,6 +36,86 @@ bb:
ret void
}
+; GCN-LABEL: {{^}}add_adde:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
+; GCN-NOT: v_cndmask
+; GCN-NOT: v_add
+
+define amdgpu_kernel void @add_adde(i32 addrspace(1)* nocapture %arg, i32 %a) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
+ %v = load i32, i32 addrspace(1)* %gep, align 4
+ %cmp = icmp ugt i32 %x, %y
+ %ext = zext i1 %cmp to i32
+ %adde = add i32 %v, %ext
+ %add2 = add i32 %adde, %a
+ store i32 %add2, i32 addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}adde_add:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
+; GCN-NOT: v_cndmask
+; GCN-NOT: v_add
+
+define amdgpu_kernel void @adde_add(i32 addrspace(1)* nocapture %arg, i32 %a) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
+ %v = load i32, i32 addrspace(1)* %gep, align 4
+ %cmp = icmp ugt i32 %x, %y
+ %ext = zext i1 %cmp to i32
+ %add = add i32 %v, %a
+ %adde = add i32 %add, %ext
+ store i32 %adde, i32 addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}sub_sube:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
+; GCN-NOT: v_cndmask
+; GCN-NOT: v_sub
+
+define amdgpu_kernel void @sub_sube(i32 addrspace(1)* nocapture %arg, i32 %a) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
+ %v = load i32, i32 addrspace(1)* %gep, align 4
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %adde = add i32 %v, %ext
+ %sub = sub i32 %adde, %a
+ store i32 %sub, i32 addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}sube_sub:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
+; GCN-NOT: v_cndmask
+; GCN-NOT: v_sub
+
+define amdgpu_kernel void @sube_sub(i32 addrspace(1)* nocapture %arg, i32 %a) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
+ %v = load i32, i32 addrspace(1)* %gep, align 4
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %sub = sub i32 %v, %a
+ %adde = add i32 %sub, %ext
+ store i32 %adde, i32 addrspace(1)* %gep, align 4
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x() #0
declare i32 @llvm.amdgcn.workitem.id.y() #0
More information about the llvm-commits
mailing list