[llvm] 022f5ad - Move isAllInactivePredicate and isAllActivePredicate definition upwards, NFC
Dinar Temirbulatov via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 10 06:24:55 PST 2023
Author: Dinar Temirbulatov
Date: 2023-01-10T14:23:29Z
New Revision: 022f5ad3ec18e7a77fc5c0cd7ec5754379b8fe3e
URL: https://github.com/llvm/llvm-project/commit/022f5ad3ec18e7a77fc5c0cd7ec5754379b8fe3e
DIFF: https://github.com/llvm/llvm-project/commit/022f5ad3ec18e7a77fc5c0cd7ec5754379b8fe3e.diff
LOG: Move isAllInactivePredicate and isAllActivePredicate definition upwards, NFC
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 74d61c2e4ea7..26f185350f35 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16272,6 +16272,53 @@ static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) {
return false;
}
+static bool isAllInactivePredicate(SDValue N) {
+ // Look through cast.
+ while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST)
+ N = N.getOperand(0);
+
+ return ISD::isConstantSplatVectorAllZeros(N.getNode());
+}
+
+static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
+ unsigned NumElts = N.getValueType().getVectorMinNumElements();
+
+ // Look through cast.
+ while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) {
+ N = N.getOperand(0);
+ // When reinterpreting from a type with fewer elements the "new" elements
+ // are not active, so bail if they're likely to be used.
+ if (N.getValueType().getVectorMinNumElements() < NumElts)
+ return false;
+ }
+
+ if (ISD::isConstantSplatVectorAllOnes(N.getNode()))
+ return true;
+
+ // "ptrue p.<ty>, all" can be considered all active when <ty> is the same size
+ // or smaller than the implicit element type represented by N.
+ // NOTE: A larger element count implies a smaller element type.
+ if (N.getOpcode() == AArch64ISD::PTRUE &&
+ N.getConstantOperandVal(0) == AArch64SVEPredPattern::all)
+ return N.getValueType().getVectorMinNumElements() >= NumElts;
+
+ // If we're compiling for a specific vector-length, we can check if the
+ // pattern's VL equals that of the scalable vector at runtime.
+ if (N.getOpcode() == AArch64ISD::PTRUE) {
+ const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
+ unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
+ unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
+ if (MaxSVESize && MinSVESize == MaxSVESize) {
+ unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock;
+ unsigned PatNumElts =
+ getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0));
+ return PatNumElts == (NumElts * VScale);
+ }
+ }
+
+ return false;
+}
+
static SDValue performSVEAndCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
if (DCI.isBeforeLegalizeOps())
@@ -17955,53 +18002,6 @@ static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc,
Zero);
}
-static bool isAllInactivePredicate(SDValue N) {
- // Look through cast.
- while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST)
- N = N.getOperand(0);
-
- return ISD::isConstantSplatVectorAllZeros(N.getNode());
-}
-
-static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
- unsigned NumElts = N.getValueType().getVectorMinNumElements();
-
- // Look through cast.
- while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) {
- N = N.getOperand(0);
- // When reinterpreting from a type with fewer elements the "new" elements
- // are not active, so bail if they're likely to be used.
- if (N.getValueType().getVectorMinNumElements() < NumElts)
- return false;
- }
-
- if (ISD::isConstantSplatVectorAllOnes(N.getNode()))
- return true;
-
- // "ptrue p.<ty>, all" can be considered all active when <ty> is the same size
- // or smaller than the implicit element type represented by N.
- // NOTE: A larger element count implies a smaller element type.
- if (N.getOpcode() == AArch64ISD::PTRUE &&
- N.getConstantOperandVal(0) == AArch64SVEPredPattern::all)
- return N.getValueType().getVectorMinNumElements() >= NumElts;
-
- // If we're compiling for a specific vector-length, we can check if the
- // pattern's VL equals that of the scalable vector at runtime.
- if (N.getOpcode() == AArch64ISD::PTRUE) {
- const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
- unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
- unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
- if (MaxSVESize && MinSVESize == MaxSVESize) {
- unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock;
- unsigned PatNumElts =
- getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0));
- return PatNumElts == (NumElts * VScale);
- }
- }
-
- return false;
-}
-
// If a merged operation has no inactive lanes we can relax it to a predicated
// or unpredicated operation, which potentially allows better isel (perhaps
// using immediate forms) or relaxing register reuse requirements.
More information about the llvm-commits
mailing list