[llvm] r281495 - getVectorElementType().getSizeInBits() -> getScalarSizeInBits() ; NFCI
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 14 09:37:16 PDT 2016
Author: spatel
Date: Wed Sep 14 11:37:15 2016
New Revision: 281495
URL: http://llvm.org/viewvc/llvm-project?rev=281495&view=rev
Log:
getVectorElementType().getSizeInBits() -> getScalarSizeInBits() ; NFCI
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
llvm/trunk/lib/IR/ValueTypes.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/trunk/lib/Target/ARM/ARMISelDAGToDAG.cpp
llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
llvm/trunk/lib/Target/Mips/MipsSEISelLowering.cpp
llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h
llvm/trunk/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp
llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.h
llvm/trunk/lib/Target/X86/Utils/X86ShuffleDecode.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp Wed Sep 14 11:37:15 2016
@@ -1216,8 +1216,7 @@ SDValue SelectionDAGLegalize::ExpandExtr
}
// Add the offset to the index.
- unsigned EltSize =
- Vec.getValueType().getVectorElementType().getSizeInBits()/8;
+ unsigned EltSize = Vec.getValueType().getScalarSizeInBits() / 8;
Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType()));
@@ -1268,8 +1267,7 @@ SDValue SelectionDAGLegalize::ExpandInse
// Then store the inserted part.
// Add the offset to the index.
- unsigned EltSize =
- Vec.getValueType().getVectorElementType().getSizeInBits()/8;
+ unsigned EltSize = Vec.getValueType().getScalarSizeInBits() / 8;
Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType()));
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp Wed Sep 14 11:37:15 2016
@@ -1054,7 +1054,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_B
// vector element type. Check that any extra bits introduced will be
// truncated away.
assert(N->getOperand(0).getValueSizeInBits() >=
- N->getValueType(0).getVectorElementType().getSizeInBits() &&
+ N->getValueType(0).getScalarSizeInBits() &&
"Type of inserted value narrower than vector element type!");
SmallVector<SDValue, 16> NewOps;
@@ -1084,7 +1084,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_I
// Check that any extra bits introduced will be truncated away.
assert(N->getOperand(1).getValueSizeInBits() >=
- N->getValueType(0).getVectorElementType().getSizeInBits() &&
+ N->getValueType(0).getScalarSizeInBits() &&
"Type of inserted value narrower than vector element type!");
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
GetPromotedInteger(N->getOperand(1)),
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp Wed Sep 14 11:37:15 2016
@@ -795,7 +795,7 @@ void DAGTypeLegalizer::SetScalarizedVect
// the vector element type. For example BUILD_VECTOR of type <1 x i1> with
// a constant i8 operand.
assert(Result.getValueSizeInBits() >=
- Op.getValueType().getVectorElementType().getSizeInBits() &&
+ Op.getValueType().getScalarSizeInBits() &&
"Invalid type for scalarized vector");
AnalyzeNewValue(Result);
@@ -913,7 +913,7 @@ SDValue DAGTypeLegalizer::BitConvertToIn
/// Convert to a vector of integers of the same size.
SDValue DAGTypeLegalizer::BitConvertVectorToIntegerVector(SDValue Op) {
assert(Op.getValueType().isVector() && "Only applies to vectors!");
- unsigned EltWidth = Op.getValueType().getVectorElementType().getSizeInBits();
+ unsigned EltWidth = Op.getValueType().getScalarSizeInBits();
EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth);
unsigned NumElts = Op.getValueType().getVectorNumElements();
return DAG.getNode(ISD::BITCAST, SDLoc(Op),
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp Wed Sep 14 11:37:15 2016
@@ -817,8 +817,8 @@ SDValue VectorLegalizer::ExpandSIGN_EXTE
// Now we need sign extend. Do this by shifting the elements. Even if these
// aren't legal operations, they have a better chance of being legalized
// without full scalarization than the sign extension does.
- unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
- unsigned SrcEltWidth = SrcVT.getVectorElementType().getSizeInBits();
+ unsigned EltWidth = VT.getScalarSizeInBits();
+ unsigned SrcEltWidth = SrcVT.getScalarSizeInBits();
SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT);
return DAG.getNode(ISD::SRA, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount),
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp Wed Sep 14 11:37:15 2016
@@ -1282,7 +1282,7 @@ void DAGTypeLegalizer::SplitVecRes_Exten
LLVMContext &Ctx = *DAG.getContext();
EVT NewSrcVT = EVT::getVectorVT(
Ctx, EVT::getIntegerVT(
- Ctx, SrcVT.getVectorElementType().getSizeInBits() * 2),
+ Ctx, SrcVT.getScalarSizeInBits() * 2),
NumElements);
EVT SplitSrcVT =
EVT::getVectorVT(Ctx, SrcVT.getVectorElementType(), NumElements / 2);
@@ -1940,8 +1940,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_Tru
// if we're trying to split it at all. assert() that's true, just in case.
assert(!(NumElements & 1) && "Splitting vector, but not in half!");
- unsigned InElementSize = InVT.getVectorElementType().getSizeInBits();
- unsigned OutElementSize = OutVT.getVectorElementType().getSizeInBits();
+ unsigned InElementSize = InVT.getScalarSizeInBits();
+ unsigned OutElementSize = OutVT.getScalarSizeInBits();
// If the input elements are only 1/2 the width of the result elements,
// just use the normal splitting. Our trick only work if there's room
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Wed Sep 14 11:37:15 2016
@@ -134,7 +134,7 @@ bool ISD::isBuildVectorAllOnes(const SDN
// we care if the resultant vector is all ones, not whether the individual
// constants are.
SDValue NotZero = N->getOperand(i);
- unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
+ unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
if (CN->getAPIntValue().countTrailingOnes() < EltSize)
return false;
@@ -173,7 +173,7 @@ bool ISD::isBuildVectorAllZeros(const SD
// We only want to check enough bits to cover the vector elements, because
// we care if the resultant vector is all zeros, not whether the individual
// constants are.
- unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
+ unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
if (CN->getAPIntValue().countTrailingZeros() < EltSize)
return false;
@@ -7125,7 +7125,7 @@ bool BuildVectorSDNode::isConstantSplat(
// false.
unsigned int nOps = getNumOperands();
assert(nOps > 0 && "isConstantSplat has 0-size build vector");
- unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltBitSize = VT.getScalarSizeInBits();
for (unsigned j = 0; j < nOps; ++j) {
unsigned i = isBigEndian ? nOps-1-j : j;
Modified: llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp Wed Sep 14 11:37:15 2016
@@ -1395,7 +1395,7 @@ void TargetLoweringBase::computeRegister
MVT SVT = (MVT::SimpleValueType) nVT;
// Promote vectors of integers to vectors with the same number
// of elements, with a wider element type.
- if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits() &&
+ if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
Modified: llvm/trunk/lib/IR/ValueTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/ValueTypes.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/IR/ValueTypes.cpp (original)
+++ llvm/trunk/lib/IR/ValueTypes.cpp Wed Sep 14 11:37:15 2016
@@ -26,7 +26,7 @@ EVT EVT::changeExtendedTypeToInteger() c
EVT EVT::changeExtendedVectorElementTypeToInteger() const {
LLVMContext &Context = LLVMTy->getContext();
- EVT IntTy = getIntegerVT(Context, getVectorElementType().getSizeInBits());
+ EVT IntTy = getIntegerVT(Context, getScalarSizeInBits());
return getVectorVT(Context, IntTy, getVectorNumElements());
}
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Wed Sep 14 11:37:15 2016
@@ -2130,7 +2130,7 @@ static bool isExtendedBUILD_VECTOR(SDNod
for (const SDValue &Elt : N->op_values()) {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
unsigned HalfSize = EltSize / 2;
if (isSigned) {
if (!isIntN(HalfSize, C->getSExtValue()))
@@ -2157,7 +2157,7 @@ static SDValue skipExtensionForVectorMUL
assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
EVT VT = N->getValueType(0);
SDLoc dl(N);
- unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
+ unsigned EltSize = VT.getScalarSizeInBits() / 2;
unsigned NumElts = VT.getVectorNumElements();
MVT TruncVT = MVT::getIntegerVT(EltSize);
SmallVector<SDValue, 8> Ops;
@@ -5028,7 +5028,7 @@ SDValue AArch64TargetLowering::Reconstru
}
}
unsigned ResMultiplier =
- VT.getVectorElementType().getSizeInBits() / SmallestEltTy.getSizeInBits();
+ VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
@@ -5113,7 +5113,7 @@ SDValue AArch64TargetLowering::Reconstru
// The stars all align, our next step is to produce the mask for the shuffle.
SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
- int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits();
+ int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
SDValue Entry = Op.getOperand(i);
if (Entry.isUndef())
@@ -5126,8 +5126,8 @@ SDValue AArch64TargetLowering::Reconstru
// trunc. So only std::min(SrcBits, DestBits) actually get defined in this
// segment.
EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
- int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
- VT.getVectorElementType().getSizeInBits());
+ int BitsDefined =
+ std::min(OrigEltTy.getSizeInBits(), VT.getScalarSizeInBits());
int LanesDefined = BitsDefined / BitsPerShuffleLane;
// This source is expected to fill ResMultiplier lanes of the final shuffle,
@@ -5231,7 +5231,7 @@ static bool isREVMask(ArrayRef<int> M, E
assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
"Only possible block sizes for REV are: 16, 32, 64");
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
@@ -5949,7 +5949,7 @@ static SDValue tryLowerToSLI(SDNode *N,
// Is C1 == ~C2, taking into account how much one can shift elements of a
// particular size?
uint64_t C2 = C2node->getZExtValue();
- unsigned ElemSizeInBits = VT.getVectorElementType().getSizeInBits();
+ unsigned ElemSizeInBits = VT.getScalarSizeInBits();
if (C2 > ElemSizeInBits)
return SDValue();
unsigned ElemMask = (1 << ElemSizeInBits) - 1;
@@ -6445,7 +6445,7 @@ FailedModImm:
if (!isConstant && !usesOnlyOneValue) {
SDValue Vec = DAG.getUNDEF(VT);
SDValue Op0 = Op.getOperand(0);
- unsigned ElemSize = VT.getVectorElementType().getSizeInBits();
+ unsigned ElemSize = VT.getScalarSizeInBits();
unsigned i = 0;
// For 32 and 64 bit types, use INSERT_SUBREG for lane zero to
// a) Avoid a RMW dependency on the full vector register, and
@@ -6567,7 +6567,7 @@ SDValue AArch64TargetLowering::LowerEXTR
// If this is extracting the upper 64-bits of a 128-bit vector, we match
// that directly.
- if (Size == 64 && Val * VT.getVectorElementType().getSizeInBits() == 64)
+ if (Size == 64 && Val * VT.getScalarSizeInBits() == 64)
return Op;
return SDValue();
@@ -6637,7 +6637,7 @@ static bool getVShiftImm(SDValue Op, uns
/// 0 <= Value <= ElementBits for a long left shift.
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
assert(VT.isVector() && "vector shift count is not a vector type");
- int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
+ int64_t ElementBits = VT.getScalarSizeInBits();
if (!getVShiftImm(Op, ElementBits, Cnt))
return false;
return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
@@ -6648,7 +6648,7 @@ static bool isVShiftLImm(SDValue Op, EVT
/// 1 <= Value <= ElementBits for a right shift; or
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) {
assert(VT.isVector() && "vector shift count is not a vector type");
- int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
+ int64_t ElementBits = VT.getScalarSizeInBits();
if (!getVShiftImm(Op, ElementBits, Cnt))
return false;
return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
@@ -6662,7 +6662,7 @@ SDValue AArch64TargetLowering::LowerVect
if (!Op.getOperand(1).getValueType().isVector())
return Op;
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
switch (Op.getOpcode()) {
default:
@@ -7937,7 +7937,7 @@ static SDValue tryCombineToBSL(SDNode *N
// We only have to look for constant vectors here since the general, variable
// case can be handled in TableGen.
- unsigned Bits = VT.getVectorElementType().getSizeInBits();
+ unsigned Bits = VT.getScalarSizeInBits();
uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
for (int i = 1; i >= 0; --i)
for (int j = 1; j >= 0; --j) {
@@ -8126,7 +8126,7 @@ static SDValue performConcatVectorsCombi
// splat. The indexed instructions are going to be expecting a DUPLANE64, so
// canonicalise to that.
if (N0 == N1 && VT.getVectorNumElements() == 2) {
- assert(VT.getVectorElementType().getSizeInBits() == 64);
+ assert(VT.getScalarSizeInBits() == 64);
return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG),
DAG.getConstant(0, dl, MVT::i64));
}
@@ -8691,7 +8691,7 @@ static SDValue performExtendCombine(SDNo
if (SrcVT.getSizeInBits() != 64)
return SDValue();
- unsigned SrcEltSize = SrcVT.getVectorElementType().getSizeInBits();
+ unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
unsigned ElementCount = SrcVT.getVectorNumElements();
SrcVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize * 2), ElementCount);
SDLoc DL(N);
Modified: llvm/trunk/lib/Target/ARM/ARMISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelDAGToDAG.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelDAGToDAG.cpp Wed Sep 14 11:37:15 2016
@@ -2142,7 +2142,7 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SD
unsigned Alignment = 0;
if (NumVecs != 3) {
Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
- unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
+ unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
if (Alignment < 8 && Alignment < NumBytes)
@@ -2257,7 +2257,7 @@ void ARMDAGToDAGISel::SelectVLDDup(SDNod
unsigned Alignment = 0;
if (NumVecs != 3) {
Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
- unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
+ unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
if (Alignment < 8 && Alignment < NumBytes)
Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Wed Sep 14 11:37:15 2016
@@ -5626,7 +5626,7 @@ static bool isVREVMask(ArrayRef<int> M,
assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
"Only possible block sizes for VREV are: 16, 32, 64");
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
@@ -5677,7 +5677,7 @@ static bool isVTBLMask(ArrayRef<int> M,
// want to check the low half and high half of the shuffle mask as if it were
// the other case
static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
@@ -5712,7 +5712,7 @@ static bool isVTRNMask(ArrayRef<int> M,
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
@@ -5747,7 +5747,7 @@ static bool isVTRN_v_undef_Mask(ArrayRef
// Requires similar checks to that of isVTRNMask with
// respect the how results are returned.
static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
@@ -5777,7 +5777,7 @@ static bool isVUZPMask(ArrayRef<int> M,
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
@@ -5818,7 +5818,7 @@ static bool isVUZP_v_undef_Mask(ArrayRef
// Requires similar checks to that of isVTRNMask with respect the how results
// are returned.
static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
@@ -5851,7 +5851,7 @@ static bool isVZIPMask(ArrayRef<int> M,
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
@@ -6033,7 +6033,7 @@ SDValue ARMTargetLowering::LowerBUILD_VE
if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
// Use VDUP for non-constant splats. For f32 constant splats, reduce to
// i32 and try again.
@@ -6221,7 +6221,7 @@ SDValue ARMTargetLowering::ReconstructSh
SmallestEltTy = SrcEltTy;
}
unsigned ResMultiplier =
- VT.getVectorElementType().getSizeInBits() / SmallestEltTy.getSizeInBits();
+ VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
@@ -6307,7 +6307,7 @@ SDValue ARMTargetLowering::ReconstructSh
// The stars all align, our next step is to produce the mask for the shuffle.
SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
- int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits();
+ int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
SDValue Entry = Op.getOperand(i);
if (Entry.isUndef())
@@ -6321,7 +6321,7 @@ SDValue ARMTargetLowering::ReconstructSh
// segment.
EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
- VT.getVectorElementType().getSizeInBits());
+ VT.getScalarSizeInBits());
int LanesDefined = BitsDefined / BitsPerShuffleLane;
// This source is expected to fill ResMultiplier lanes of the final shuffle,
@@ -6381,7 +6381,7 @@ ARMTargetLowering::isShuffleMaskLegal(co
bool ReverseVEXT, isV_UNDEF;
unsigned Imm, WhichResult;
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
return (EltSize >= 32 ||
ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
isVREVMask(M, VT, 64) ||
@@ -6524,7 +6524,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDVal
// of the same time so that they get CSEd properly.
ArrayRef<int> ShuffleMask = SVN->getMask();
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
if (EltSize <= 32) {
if (SVN->isSplat()) {
int Lane = SVN->getSplatIndex();
@@ -6699,7 +6699,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT(S
SDValue Vec = Op.getOperand(0);
if (Op.getValueType() == MVT::i32 &&
- Vec.getValueType().getVectorElementType().getSizeInBits() < 32) {
+ Vec.getValueType().getScalarSizeInBits() < 32) {
SDLoc dl(Op);
return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
}
@@ -6764,7 +6764,7 @@ static bool isExtendedBUILD_VECTOR(SDNod
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDNode *Elt = N->getOperand(i).getNode();
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
unsigned HalfSize = EltSize / 2;
if (isSigned) {
if (!isIntN(HalfSize, C->getSExtValue()))
@@ -6891,7 +6891,7 @@ static SDValue SkipExtensionForVMULL(SDN
// Construct a new BUILD_VECTOR with elements truncated to half the size.
assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
EVT VT = N->getValueType(0);
- unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
+ unsigned EltSize = VT.getScalarSizeInBits() / 2;
unsigned NumElts = VT.getVectorNumElements();
MVT TruncVT = MVT::getIntegerVT(EltSize);
SmallVector<SDValue, 8> Ops;
@@ -10516,14 +10516,14 @@ static SDValue PerformVDUPLANECombine(SD
return SDValue();
// Make sure the VMOV element size is not bigger than the VDUPLANE elements.
- unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits();
+ unsigned EltSize = Op.getValueType().getScalarSizeInBits();
// The canonical VMOV for a zero vector uses a 32-bit element size.
unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
unsigned EltBits;
if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
EltSize = 8;
EVT VT = N->getValueType(0);
- if (EltSize > VT.getVectorElementType().getSizeInBits())
+ if (EltSize > VT.getScalarSizeInBits())
return SDValue();
return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
@@ -10560,8 +10560,8 @@ static SDValue PerformSTORECombine(SDNod
EVT StVT = St->getMemoryVT();
unsigned NumElems = VT.getVectorNumElements();
assert(StVT != VT && "Cannot truncate to the same type");
- unsigned FromEltSz = VT.getVectorElementType().getSizeInBits();
- unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits();
+ unsigned FromEltSz = VT.getScalarSizeInBits();
+ unsigned ToEltSz = StVT.getScalarSizeInBits();
// From, To sizes and ElemCount must be pow of two
if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue();
@@ -10829,7 +10829,7 @@ static bool getVShiftImm(SDValue Op, uns
/// 0 <= Value <= ElementBits for a long left shift.
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
assert(VT.isVector() && "vector shift count is not a vector type");
- int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
+ int64_t ElementBits = VT.getScalarSizeInBits();
if (! getVShiftImm(Op, ElementBits, Cnt))
return false;
return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
@@ -10844,7 +10844,7 @@ static bool isVShiftLImm(SDValue Op, EVT
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
int64_t &Cnt) {
assert(VT.isVector() && "vector shift count is not a vector type");
- int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
+ int64_t ElementBits = VT.getScalarSizeInBits();
if (! getVShiftImm(Op, ElementBits, Cnt))
return false;
if (!isIntrinsic)
Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp Wed Sep 14 11:37:15 2016
@@ -816,7 +816,7 @@ void HexagonDAGToDAGISel::SelectZeroExte
SDNode *Mask = CurDAG->getMachineNode(Hexagon::C2_mask, dl, MVT::i64, Op0);
unsigned NE = OpVT.getVectorNumElements();
EVT ExVT = N->getValueType(0);
- unsigned ES = ExVT.getVectorElementType().getSizeInBits();
+ unsigned ES = ExVT.getScalarSizeInBits();
uint64_t MV = 0, Bit = 1;
for (unsigned i = 0; i < NE; ++i) {
MV |= Bit;
Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp Wed Sep 14 11:37:15 2016
@@ -2389,7 +2389,7 @@ HexagonTargetLowering::LowerVECTOR_SHUFF
if (UseHVX) {
ArrayRef<int> Mask = SVN->getMask();
size_t MaskLen = Mask.size();
- int ElemSizeInBits = VT.getVectorElementType().getSizeInBits();
+ int ElemSizeInBits = VT.getScalarSizeInBits();
if ((Subtarget.useHVXSglOps() && (ElemSizeInBits * MaskLen) == 64 * 8) ||
(Subtarget.useHVXDblOps() && (ElemSizeInBits * MaskLen) == 128 * 8)) {
// Return 1 for odd and 2 of even
Modified: llvm/trunk/lib/Target/Mips/MipsSEISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsSEISelLowering.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsSEISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsSEISelLowering.cpp Wed Sep 14 11:37:15 2016
@@ -852,7 +852,7 @@ static SDValue performDSPShiftCombine(un
APInt SplatValue, SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
- unsigned EltSize = Ty.getVectorElementType().getSizeInBits();
+ unsigned EltSize = Ty.getScalarSizeInBits();
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
if (!Subtarget.hasDSP())
@@ -1504,7 +1504,7 @@ static SDValue lowerMSABitClear(SDValue
static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG) {
SDLoc DL(Op);
EVT ResTy = Op->getValueType(0);
- APInt BitImm = APInt(ResTy.getVectorElementType().getSizeInBits(), 1)
+ APInt BitImm = APInt(ResTy.getScalarSizeInBits(), 1)
<< cast<ConstantSDNode>(Op->getOperand(2))->getAPIntValue();
SDValue BitMask = DAG.getConstant(~BitImm, DL, ResTy);
Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h Wed Sep 14 11:37:15 2016
@@ -474,7 +474,7 @@ namespace llvm {
/// then the VPERM for the shuffle. All in all a very slow sequence.
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT)
const override {
- if (VT.getVectorElementType().getSizeInBits() % 8 == 0)
+ if (VT.getScalarSizeInBits() % 8 == 0)
return TypeWidenVector;
return TargetLoweringBase::getPreferredVectorAction(VT);
}
Modified: llvm/trunk/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp Wed Sep 14 11:37:15 2016
@@ -1309,7 +1309,7 @@ void SystemZDAGToDAGISel::Select(SDNode
case ISD::INSERT_VECTOR_ELT: {
EVT VT = Node->getValueType(0);
- unsigned ElemBitSize = VT.getVectorElementType().getSizeInBits();
+ unsigned ElemBitSize = VT.getScalarSizeInBits();
if (ElemBitSize == 32) {
if (tryGather(Node, SystemZ::VGEF))
return;
Modified: llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp Wed Sep 14 11:37:15 2016
@@ -3102,7 +3102,7 @@ SDValue SystemZTargetLowering::lowerCTPO
if (VT.isVector()) {
Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
- switch (VT.getVectorElementType().getSizeInBits()) {
+ switch (VT.getScalarSizeInBits()) {
case 8:
break;
case 16: {
@@ -4377,7 +4377,7 @@ SDValue SystemZTargetLowering::lowerINSE
}
// Otherwise bitcast to the equivalent integer form and insert via a GPR.
- MVT IntVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
+ MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements());
SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT,
DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0),
@@ -4417,8 +4417,8 @@ SystemZTargetLowering::lowerExtendVector
SDValue PackedOp = Op.getOperand(0);
EVT OutVT = Op.getValueType();
EVT InVT = PackedOp.getValueType();
- unsigned ToBits = OutVT.getVectorElementType().getSizeInBits();
- unsigned FromBits = InVT.getVectorElementType().getSizeInBits();
+ unsigned ToBits = OutVT.getScalarSizeInBits();
+ unsigned FromBits = InVT.getScalarSizeInBits();
do {
FromBits *= 2;
EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits),
@@ -4435,7 +4435,7 @@ SDValue SystemZTargetLowering::lowerShif
SDValue Op1 = Op.getOperand(1);
SDLoc DL(Op);
EVT VT = Op.getValueType();
- unsigned ElemBitSize = VT.getVectorElementType().getSizeInBits();
+ unsigned ElemBitSize = VT.getScalarSizeInBits();
// See whether the shift vector is a splat represented as BUILD_VECTOR.
if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
@@ -4709,7 +4709,7 @@ const char *SystemZTargetLowering::getTa
// Return true if VT is a vector whose elements are a whole number of bytes
// in width.
static bool canTreatAsByteVector(EVT VT) {
- return VT.isVector() && VT.getVectorElementType().getSizeInBits() % 8 == 0;
+ return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0;
}
// Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
Modified: llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.h?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.h (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.h Wed Sep 14 11:37:15 2016
@@ -382,7 +382,7 @@ public:
//
// (c) there are no multiplication instructions for the widest integer
// type (v2i64).
- if (VT.getVectorElementType().getSizeInBits() % 8 == 0)
+ if (VT.getScalarSizeInBits() % 8 == 0)
return TypeWidenVector;
return TargetLoweringBase::getPreferredVectorAction(VT);
}
Modified: llvm/trunk/lib/Target/X86/Utils/X86ShuffleDecode.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/Utils/X86ShuffleDecode.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/Utils/X86ShuffleDecode.cpp (original)
+++ llvm/trunk/lib/Target/X86/Utils/X86ShuffleDecode.cpp Wed Sep 14 11:37:15 2016
@@ -136,7 +136,7 @@ void DecodePSRLDQMask(MVT VT, unsigned I
void DecodePALIGNRMask(MVT VT, unsigned Imm,
SmallVectorImpl<int> &ShuffleMask) {
unsigned NumElts = VT.getVectorNumElements();
- unsigned Offset = Imm * (VT.getVectorElementType().getSizeInBits() / 8);
+ unsigned Offset = Imm * (VT.getScalarSizeInBits() / 8);
unsigned NumLanes = VT.getSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=281495&r1=281494&r2=281495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Sep 14 11:37:15 2016
@@ -4277,7 +4277,7 @@ static bool isVEXTRACTIndex(SDNode *N, u
cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
MVT VT = N->getSimpleValueType(0);
- unsigned ElSize = VT.getVectorElementType().getSizeInBits();
+ unsigned ElSize = VT.getScalarSizeInBits();
bool Result = (Index * ElSize) % vecWidth == 0;
return Result;
@@ -4295,7 +4295,7 @@ static bool isVINSERTIndex(SDNode *N, un
cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
MVT VT = N->getSimpleValueType(0);
- unsigned ElSize = VT.getVectorElementType().getSizeInBits();
+ unsigned ElSize = VT.getScalarSizeInBits();
bool Result = (Index * ElSize) % vecWidth == 0;
return Result;
@@ -12561,10 +12561,10 @@ X86TargetLowering::LowerEXTRACT_VECTOR_E
if (!isa<ConstantSDNode>(Idx)) {
if (VecVT.is512BitVector() ||
(VecVT.is256BitVector() && Subtarget.hasInt256() &&
- VecVT.getVectorElementType().getSizeInBits() == 32)) {
+ VecVT.getScalarSizeInBits() == 32)) {
MVT MaskEltVT =
- MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
+ MVT::getIntegerVT(VecVT.getScalarSizeInBits());
MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
MaskEltVT.getSizeInBits());
@@ -15673,7 +15673,7 @@ static SDValue LowerVSETCC(SDValue Op, c
// In this case use SSE compare
bool UseAVX512Inst =
(OpVT.is512BitVector() ||
- OpVT.getVectorElementType().getSizeInBits() >= 32 ||
+ OpVT.getScalarSizeInBits() >= 32 ||
(Subtarget.hasBWI() && Subtarget.hasVLX()));
if (UseAVX512Inst)
@@ -16389,7 +16389,7 @@ static SDValue LowerSIGN_EXTEND_VECTOR_I
SDValue SignExt = Curr;
if (CurrVT != InVT) {
unsigned SignExtShift =
- CurrVT.getVectorElementType().getSizeInBits() - InSVT.getSizeInBits();
+ CurrVT.getScalarSizeInBits() - InSVT.getSizeInBits();
SignExt = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr,
DAG.getConstant(SignExtShift, dl, MVT::i8));
}
@@ -28383,7 +28383,7 @@ static SDValue performShiftToAllZeros(SD
if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
const APInt &ShiftAmt = AmtSplat->getAPIntValue();
unsigned MaxAmount =
- VT.getSimpleVT().getVectorElementType().getSizeInBits();
+ VT.getSimpleVT().getScalarSizeInBits();
// SSE2/AVX2 logical shifts always return a vector of 0s
// if the shift amount is bigger than or equal to
@@ -28902,7 +28902,7 @@ static SDValue combineLogicBlendIntoPBLE
// Validate that the Mask operand is a vector sra node.
// FIXME: what to do for bytes, since there is a psignb/pblendvb, but
// there is no psrai.b
- unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
+ unsigned EltBits = MaskVT.getScalarSizeInBits();
unsigned SraAmt = ~0;
if (Mask.getOpcode() == ISD::SRA) {
if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
@@ -29552,8 +29552,8 @@ static SDValue combineMaskedLoad(SDNode
SDLoc dl(Mld);
assert(LdVT != VT && "Cannot extend to the same type");
- unsigned ToSz = VT.getVectorElementType().getSizeInBits();
- unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
+ unsigned ToSz = VT.getScalarSizeInBits();
+ unsigned FromSz = LdVT.getScalarSizeInBits();
// From/To sizes and ElemCount must be pow of two.
assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
"Unexpected size for extending masked load");
@@ -29658,8 +29658,8 @@ static SDValue combineMaskedStore(SDNode
SDLoc dl(Mst);
assert(StVT != VT && "Cannot truncate to the same type");
- unsigned FromSz = VT.getVectorElementType().getSizeInBits();
- unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
+ unsigned FromSz = VT.getScalarSizeInBits();
+ unsigned ToSz = StVT.getScalarSizeInBits();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -29787,8 +29787,8 @@ static SDValue combineStore(SDNode *N, S
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned NumElems = VT.getVectorNumElements();
assert(StVT != VT && "Cannot truncate to the same type");
- unsigned FromSz = VT.getVectorElementType().getSizeInBits();
- unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
+ unsigned FromSz = VT.getScalarSizeInBits();
+ unsigned ToSz = StVT.getScalarSizeInBits();
// The truncating store is legal in some cases. For example
// vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
More information about the llvm-commits
mailing list