[llvm] a2f4c80 - [X86] getTargetConstantBitsFromNode - add explicit support for whole/partial undef elements to all constant folding uses.
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Jan 28 05:53:12 PST 2024
Author: Simon Pilgrim
Date: 2024-01-28T13:53:01Z
New Revision: a2f4c80ea8792f6d4c27e4cf9967400ac65d9ec6
URL: https://github.com/llvm/llvm-project/commit/a2f4c80ea8792f6d4c27e4cf9967400ac65d9ec6
DIFF: https://github.com/llvm/llvm-project/commit/a2f4c80ea8792f6d4c27e4cf9967400ac65d9ec6.diff
LOG: [X86] getTargetConstantBitsFromNode - add explicit support for whole/partial undef elements to all constant folding uses.
A NFC step before I start restricting the use of undef elements in getTargetConstantBitsFromNode
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e1a4a7fb5d15ef8..77bb87d1d35d1c6 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -38836,7 +38836,9 @@ static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
for (unsigned I = 0; I != NumOps; ++I)
if (!getTargetConstantBitsFromNode(Ops[I], MaskSizeInBits, UndefEltsOps[I],
- RawBitsOps[I]))
+ RawBitsOps[I],
+ /*AllowWholeUndefs*/ true,
+ /*AllowPartialUndefs*/ true))
return SDValue();
// If we're optimizing for size, only fold if at least one of the constants is
@@ -39263,7 +39265,9 @@ static SDValue combineX86ShufflesRecursively(
SmallVector<APInt> RawBits;
unsigned EltSizeInBits = RootSizeInBits / Mask.size();
return getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
- RawBits);
+ RawBits,
+ /*AllowWholeUndefs*/ true,
+ /*AllowPartialUndefs*/ true);
})) {
return SDValue();
}
@@ -42981,7 +42985,9 @@ static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
// Detect MMX constant vectors.
APInt UndefElts;
SmallVector<APInt, 1> EltBits;
- if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
+ if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits,
+ /*AllowWholeUndefs*/ true,
+ /*AllowPartialUndefs*/ true)) {
SDLoc DL(N0);
// Handle zero-extension of i32 with MOVD.
if (EltBits[0].countl_zero() >= 32)
@@ -47305,8 +47311,12 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
SmallVector<APInt, 32> EltBits0, EltBits1;
if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
(N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
- getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
- getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
+ getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0,
+ /*AllowWholeUndefs*/ true,
+ /*AllowPartialUndefs*/ true) &&
+ getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1,
+ /*AllowWholeUndefs*/ true,
+ /*AllowPartialUndefs*/ true)) {
unsigned NumLanes = VT.getSizeInBits() / 128;
unsigned NumSrcElts = NumDstElts / 2;
unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
@@ -47613,7 +47623,9 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
auto TryConstantFold = [&](SDValue V) {
APInt UndefElts;
SmallVector<APInt, 32> EltBits;
- if (!getTargetConstantBitsFromNode(V, NumBitsPerElt, UndefElts, EltBits))
+ if (!getTargetConstantBitsFromNode(V, NumBitsPerElt, UndefElts, EltBits,
+ /*AllowWholeUndefs*/ true,
+ /*AllowPartialUndefs*/ true))
return SDValue();
assert(EltBits.size() == VT.getVectorNumElements() &&
"Unexpected shift value type");
@@ -52233,8 +52245,12 @@ static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
// Constant Folding
APInt Undefs0, Undefs1;
SmallVector<APInt> EltBits0, EltBits1;
- if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0)) {
- if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1)) {
+ if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0,
+ /*AllowWholeUndefs*/ true,
+ /*AllowPartialUndefs*/ true)) {
+ if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1,
+ /*AllowWholeUndefs*/ true,
+ /*AllowPartialUndefs*/ true)) {
SmallVector<APInt> ResultBits;
for (int I = 0; I != NumElts; ++I)
ResultBits.push_back(~EltBits0[I] & EltBits1[I]);
@@ -53175,7 +53191,9 @@ static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
// Perform constant folding.
APInt UndefElts;
SmallVector<APInt, 32> EltBits;
- if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits)) {
+ if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits,
+ /*AllowWholeUndefs*/ true,
+ /*AllowPartialUndefs*/ true)) {
APInt Imm(32, 0);
for (unsigned Idx = 0; Idx != NumElts; ++Idx)
if (!UndefElts[Idx] && EltBits[Idx].isNegative())
More information about the llvm-commits
mailing list