[llvm] r271526 - [DAG] use getBitcast() to reduce code
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 2 09:01:15 PDT 2016
Author: spatel
Date: Thu Jun 2 11:01:15 2016
New Revision: 271526
URL: http://llvm.org/viewvc/llvm-project?rev=271526&view=rev
Log:
[DAG] use getBitcast() to reduce code
Although this was intended to be NFC, the test case wiggle shows a change in
code scheduling/RA caused by a difference in the SDLoc() generation.
Depending on how you look at it, this is the (dis)advantage of exact checking
in regression tests.
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/trunk/test/CodeGen/X86/vec_fneg.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=271526&r1=271525&r2=271526&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Thu Jun 2 11:01:15 2016
@@ -7381,13 +7381,12 @@ SDValue DAGCombiner::visitBITCAST(SDNode
TLI.isOperationLegal(ISD::ConstantFP, VT)) ||
(isa<ConstantFPSDNode>(N0) && VT.isInteger() && !VT.isVector() &&
TLI.isOperationLegal(ISD::Constant, VT)))
- return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, N0);
+ return DAG.getBitcast(VT, N0);
}
// (conv (conv x, t1), t2) -> (conv x, t2)
if (N0.getOpcode() == ISD::BITCAST)
- return DAG.getNode(ISD::BITCAST, SDLoc(N), VT,
- N0.getOperand(0));
+ return DAG.getBitcast(VT, N0.getOperand(0));
// fold (conv (load x)) -> (load (conv*)x)
// If the resultant load doesn't need a higher alignment than the original!
@@ -7432,8 +7431,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode
(N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) &&
N0.getNode()->hasOneUse() && VT.isInteger() &&
!VT.isVector() && !N0.getValueType().isVector()) {
- SDValue NewConv = DAG.getNode(ISD::BITCAST, SDLoc(N0), VT,
- N0.getOperand(0));
+ SDValue NewConv = DAG.getBitcast(VT, N0.getOperand(0));
AddToWorklist(NewConv.getNode());
SDLoc DL(N);
@@ -7486,8 +7484,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode
unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
if (isTypeLegal(IntXVT)) {
- SDValue X = DAG.getNode(ISD::BITCAST, SDLoc(N0),
- IntXVT, N0.getOperand(1));
+ SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1));
AddToWorklist(X.getNode());
// If X has a different width than the result/lhs, sext it or truncate it.
@@ -7535,8 +7532,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode
X, DAG.getConstant(SignBit, SDLoc(X), VT));
AddToWorklist(X.getNode());
- SDValue Cst = DAG.getNode(ISD::BITCAST, SDLoc(N0),
- VT, N0.getOperand(0));
+ SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0));
Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT,
Cst, DAG.getConstant(~SignBit, SDLoc(Cst), VT));
AddToWorklist(Cst.getNode());
@@ -7568,7 +7564,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode
return SDValue(Op.getOperand(0));
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()))
- return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
+ return DAG.getBitcast(VT, Op);
return SDValue();
};
@@ -7625,8 +7621,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode
// we can end up with a scalar-to-vector node here.
if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT,
- DAG.getNode(ISD::BITCAST, SDLoc(BV),
- DstEltVT, BV->getOperand(0)));
+ DAG.getBitcast(DstEltVT, BV->getOperand(0)));
SmallVector<SDValue, 8> Ops;
for (SDValue Op : BV->op_values()) {
@@ -7634,8 +7629,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode
// are promoted and implicitly truncated. Make that explicit here.
if (Op.getValueType() != SrcEltVT)
Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op);
- Ops.push_back(DAG.getNode(ISD::BITCAST, SDLoc(BV),
- DstEltVT, Op));
+ Ops.push_back(DAG.getBitcast(DstEltVT, Op));
AddToWorklist(Ops.back().getNode());
}
return DAG.getBuildVector(VT, SDLoc(BV), Ops);
@@ -9311,7 +9305,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N
Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int,
DAG.getConstant(SignMask, DL0, IntVT));
AddToWorklist(Int.getNode());
- return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Int);
+ return DAG.getBitcast(VT, Int);
}
}
@@ -9416,7 +9410,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N
Int = DAG.getNode(ISD::AND, DL, IntVT, Int,
DAG.getConstant(SignMask, DL, IntVT));
AddToWorklist(Int.getNode());
- return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Int);
+ return DAG.getBitcast(N->getValueType(0), Int);
}
}
@@ -12304,7 +12298,7 @@ SDValue DAGCombiner::ReplaceExtractVecto
if (ResultVT.bitsLT(VecEltVT))
Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load);
else
- Load = DAG.getNode(ISD::BITCAST, SDLoc(EVE), ResultVT, Load);
+ Load = DAG.getBitcast(ResultVT, Load);
}
WorklistRemover DeadNodes(*this);
SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) };
@@ -12622,7 +12616,7 @@ SDValue DAGCombiner::reduceBuildVecExtTo
// The new BUILD_VECTOR node has the potential to be further optimized.
AddToWorklist(BV.getNode());
// Bitcast to the desired type.
- return DAG.getNode(ISD::BITCAST, dl, VT, BV);
+ return DAG.getBitcast(VT, BV);
}
SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) {
@@ -12921,15 +12915,14 @@ static SDValue combineConcatVectorOfScal
if (Op.isUndef())
Op = ScalarUndef;
else
- Op = DAG.getNode(ISD::BITCAST, DL, SVT, Op);
+ Op = DAG.getBitcast(SVT, Op);
}
}
}
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT,
VT.getSizeInBits() / SVT.getSizeInBits());
- return DAG.getNode(ISD::BITCAST, DL, VT,
- DAG.getBuildVector(VecVT, DL, Ops));
+ return DAG.getBitcast(VT, DAG.getBuildVector(VecVT, DL, Ops));
}
// Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR
@@ -13057,7 +13050,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS
SDLoc dl = SDLoc(N);
SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NVT, Scalar);
- return DAG.getNode(ISD::BITCAST, dl, VT, Res);
+ return DAG.getBitcast(VT, Res);
}
}
@@ -13214,11 +13207,11 @@ SDValue DAGCombiner::visitEXTRACT_SUBVEC
// otherwise => (extract_subvec V1, ExtIdx)
if (InsIdx->getZExtValue() * SmallVT.getScalarType().getSizeInBits() ==
ExtIdx->getZExtValue() * NVT.getScalarType().getSizeInBits())
- return DAG.getNode(ISD::BITCAST, dl, NVT, V->getOperand(1));
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NVT,
- DAG.getNode(ISD::BITCAST, dl,
- N->getOperand(0).getValueType(),
- V->getOperand(0)), N->getOperand(1));
+ return DAG.getBitcast(NVT, V->getOperand(1));
+ return DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, NVT,
+ DAG.getBitcast(N->getOperand(0).getValueType(), V->getOperand(0)),
+ N->getOperand(1));
}
}
@@ -13482,7 +13475,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE
// We may have jumped through bitcasts, so the type of the
// BUILD_VECTOR may not match the type of the shuffle.
if (V->getValueType(0) != VT)
- NewBV = DAG.getNode(ISD::BITCAST, SDLoc(N), VT, NewBV);
+ NewBV = DAG.getBitcast(VT, NewBV);
return NewBV;
}
}
@@ -13604,11 +13597,10 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE
}
if (LegalMask) {
- SV0 = DAG.getNode(ISD::BITCAST, SDLoc(N), ScaleVT, SV0);
- SV1 = DAG.getNode(ISD::BITCAST, SDLoc(N), ScaleVT, SV1);
- return DAG.getNode(
- ISD::BITCAST, SDLoc(N), VT,
- DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask));
+ SV0 = DAG.getBitcast(ScaleVT, SV0);
+ SV1 = DAG.getBitcast(ScaleVT, SV1);
+ return DAG.getBitcast(
+ VT, DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask));
}
}
}
Modified: llvm/trunk/test/CodeGen/X86/vec_fneg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_fneg.ll?rev=271526&r1=271525&r2=271526&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_fneg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_fneg.ll Thu Jun 2 11:01:15 2016
@@ -74,9 +74,9 @@ define <2 x float> @fneg_bitcast(i64 %i)
; X32-SSE2-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE2-NEXT: xorl %eax, %ecx
+; X32-SSE2-NEXT: movd %ecx, %xmm1
; X32-SSE2-NEXT: xorl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movd %eax, %xmm1
-; X32-SSE2-NEXT: movd %ecx, %xmm0
+; X32-SSE2-NEXT: movd %eax, %xmm0
; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-SSE2-NEXT: retl
;
More information about the llvm-commits
mailing list