[llvm] [VPlan] Add opcode to create step for wide inductions. (PR #119284)
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 21 05:11:47 PST 2025
https://github.com/fhahn updated https://github.com/llvm/llvm-project/pull/119284
>From a55fe62f83766951852ed10559eb731d13aab4e8 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Mon, 20 Jan 2025 14:51:35 +0000
Subject: [PATCH 1/3] Simplify ScalarIVSteps
---
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 4 +++-
llvm/lib/Transforms/Vectorize/VPlan.h | 4 ++++
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 11 +++++++++--
llvm/lib/Transforms/Vectorize/VPlanTransforms.h | 2 ++
.../LoopVectorize/PowerPC/vectorize-bswap.ll | 7 +++----
.../Transforms/LoopVectorize/pointer-induction.ll | 6 ++----
6 files changed, 23 insertions(+), 11 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 29f3940ed6fa7a..bff06114c9b0c1 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7652,7 +7652,9 @@ DenseMap<const SCEV *, Value *> LoopVectorizationPlanner::executePlan(
VPlanTransforms::unrollByUF(BestVPlan, BestUF,
OrigLoop->getHeader()->getContext());
VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
- VPlanTransforms::convertToConcreteRecipes(BestVPlan);
+ VPlanTransforms::simplifyRecipes(BestVPlan, Legal->getWidestInductionType());
+ VPlanTransforms::removeDeadRecipes(BestVPlan);
+ VPlanTransforms::convertToConcreteRecipes(BestVPlan, Legal->getWidestInductionType());
// Perform the actual loop transformation.
VPTransformState State(&TTI, BestVF, BestUF, LI, DT, ILV.Builder, &ILV,
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index db45ad8aadbbe3..56fb84571f87aa 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -4026,6 +4026,10 @@ class VPlan {
UFs.insert(UF);
}
+ unsigned hasSingleUF() const {
+ return UFs.size() == 1;
+ }
+
/// Return a string with the name of the plan and the applicable VFs and UFs.
std::string getName() const;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 9febd612c644e1..d623926663e4b7 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -888,6 +888,14 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
return;
}
+ if (auto *Steps = dyn_cast<VPScalarIVStepsRecipe>(&R)) {
+ if (Steps->getParent()->getPlan()->hasSingleUF() && Steps->getNumOperands() == 2 &&
+ vputils::onlyFirstLaneUsed(Steps)) {
+ Steps->replaceAllUsesWith(Steps->getOperand(0));
+ return;
+ }
+ }
+
VPValue *A;
if (match(&R, m_Trunc(m_ZExtOrSExt(m_VPValue(A))))) {
VPValue *Trunc = R.getVPSingleValue();
@@ -964,7 +972,7 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
/// Try to simplify the recipes in \p Plan. Use \p CanonicalIVTy as type for all
/// un-typed live-ins in VPTypeAnalysis.
-static void simplifyRecipes(VPlan &Plan, Type *CanonicalIVTy) {
+void VPlanTransforms::simplifyRecipes(VPlan &Plan, Type *CanonicalIVTy) {
ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
Plan.getEntry());
VPTypeAnalysis TypeInfo(CanonicalIVTy);
@@ -1041,7 +1049,6 @@ void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF,
}
Term->eraseFromParent();
- VPlanTransforms::removeDeadRecipes(Plan);
Plan.setVF(BestVF);
Plan.setUF(BestUF);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index a751b8b5e8dc59..af75816d595d7b 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -138,6 +138,8 @@ struct VPlanTransforms {
/// Lower abstract recipes to concrete ones, that can be codegen'd.
static void convertToConcreteRecipes(VPlan &Plan);
+static void simplifyRecipes(VPlan &Plan, Type *CanonicalIVTy);
+
/// If there's a single exit block, optimize its phi recipes that use exiting
/// IV values by feeding them precomputed end values instead, possibly taken
/// one step backwards.
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-bswap.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-bswap.ll
index b3157b75bc26fd..a515b10bb7d62a 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-bswap.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-bswap.ll
@@ -15,13 +15,12 @@ define dso_local void @test(ptr %Arr, i32 signext %Len) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0
-; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[INDEX]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[ARR:%.*]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 0
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[INDEX]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0
; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP7]], align 4
@@ -51,7 +50,7 @@ define dso_local void @test(ptr %Arr, i32 signext %Len) {
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nsw i32 [[I_02]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], [[LEN]]
-; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
index 531164a2c5dd05..526a7c9ed7a1fe 100644
--- a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
@@ -144,8 +144,7 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 1, i64 2, i64 3>
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 0
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP1]]
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, <4 x ptr> [[VECTOR_GEP]], i64 1
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0
; CHECK-NEXT: store <4 x ptr> [[TMP2]], ptr [[TMP3]], align 8
@@ -240,8 +239,7 @@ define void @non_constant_vector_expansion(i32 %0, ptr %call) {
; STRIDED-NEXT: [[TMP4:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[DOTSPLAT]]
; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> [[TMP4]]
; STRIDED-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32
-; STRIDED-NEXT: [[TMP5:%.*]] = add i32 [[OFFSET_IDX]], 0
-; STRIDED-NEXT: [[TMP6:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[TMP5]]
+; STRIDED-NEXT: [[TMP6:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[OFFSET_IDX]]
; STRIDED-NEXT: [[TMP7:%.*]] = getelementptr ptr, ptr [[TMP6]], i32 0
; STRIDED-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP7]], align 4
; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
>From 8c359c794e5c14cc7c063161cd2abe487fc5a9e2 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Mon, 9 Dec 2024 22:56:28 +0000
Subject: [PATCH 2/3] [VPlan] Add opcode to create step for wide inductions.
This patch adds a WideIVStep opcode that can be used to create a vector
with the steps to increment a wide induction. The opcode has 3 operands
* the vector step
* the scale of the vector step
* a constant indicating the target type of the VPInstruction (this is
working around having explicit types for VPInstructions, we could also
introduce a dedicated recipe, at the cost of a lot more scaffolding)
The opcode is later converted into a sequence of recipes that convert
the scale and step to the target type, if needed, and then multiply
vector step by scale.
This simplifies code that needs to materialize step vectors, e.g.
replacing wide IVs as follow up to
https://github.com/llvm/llvm-project/pull/108378 with an increment of
the wide IV step.
---
llvm/lib/Transforms/Vectorize/VPlan.h | 1 +
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 3 +-
.../Transforms/Vectorize/VPlanTransforms.cpp | 63 +++++++++++++++----
llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp | 32 +++-------
4 files changed, 62 insertions(+), 37 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 56fb84571f87aa..d9e7e6ec754fb9 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -1213,6 +1213,7 @@ class VPInstruction : public VPRecipeWithIRFlags,
CalculateTripCountMinusVF,
// Increment the canonical IV separately for each unrolled part.
CanonicalIVIncrementForPart,
+ WideIVStep,
BranchOnCount,
BranchOnCond,
ComputeReductionResult,
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index aa5f92b235555e..b808442c454f14 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -719,7 +719,8 @@ bool VPInstruction::isFPMathOp() const {
return Opcode == Instruction::FAdd || Opcode == Instruction::FMul ||
Opcode == Instruction::FNeg || Opcode == Instruction::FSub ||
Opcode == Instruction::FDiv || Opcode == Instruction::FRem ||
- Opcode == Instruction::FCmp || Opcode == Instruction::Select;
+ Opcode == Instruction::FCmp || Opcode == Instruction::Select ||
+ Opcode == VPInstruction::WideIVStep;
}
#endif
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index d623926663e4b7..028c4464942439 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -2051,20 +2051,61 @@ void VPlanTransforms::createInterleaveGroups(
}
void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) {
+ Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
+ VPTypeAnalysis TypeInfo(CanonicalIVType);
+
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_deep(Plan.getEntry()))) {
- for (VPRecipeBase &R : make_early_inc_range(VPBB->phis())) {
- if (!isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(&R))
+ for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
+ if (isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(&R)) {
+ auto *PhiR = cast<VPHeaderPHIRecipe>(&R);
+ StringRef Name =
+ isa<VPCanonicalIVPHIRecipe>(PhiR) ? "index" : "evl.based.iv";
+ auto *ScalarR = new VPScalarPHIRecipe(PhiR->getStartValue(),
+ PhiR->getBackedgeValue(),
+ PhiR->getDebugLoc(), Name);
+ ScalarR->insertBefore(PhiR);
+ PhiR->replaceAllUsesWith(ScalarR);
+ PhiR->eraseFromParent();
continue;
- auto *PhiR = cast<VPHeaderPHIRecipe>(&R);
- StringRef Name =
- isa<VPCanonicalIVPHIRecipe>(PhiR) ? "index" : "evl.based.iv";
- auto *ScalarR =
- new VPScalarPHIRecipe(PhiR->getStartValue(), PhiR->getBackedgeValue(),
- PhiR->getDebugLoc(), Name);
- ScalarR->insertBefore(PhiR);
- PhiR->replaceAllUsesWith(ScalarR);
- PhiR->eraseFromParent();
+ }
+
+ auto *VPI = dyn_cast<VPInstruction>(&R);
+ if (VPI && VPI->getOpcode() == VPInstruction::WideIVStep) {
+ VPBuilder Builder(VPI->getParent(), VPI->getIterator());
+ VPValue *VectorStep = VPI->getOperand(0);
+ Type *IVTy = TypeInfo.inferScalarType(VPI->getOperand(2));
+ if (TypeInfo.inferScalarType(VectorStep) != IVTy) {
+ Instruction::CastOps CastOp = IVTy->isFloatingPointTy()
+ ? Instruction::UIToFP
+ : Instruction::Trunc;
+ VectorStep = Builder.createWidenCast(CastOp, VectorStep, IVTy);
+ }
+
+ VPValue *ScalarStep = VPI->getOperand(1);
+ auto *ConstStep =
+ ScalarStep->isLiveIn()
+ ? dyn_cast<ConstantInt>(ScalarStep->getLiveInIRValue())
+ : nullptr;
+ if (!ConstStep || ConstStep->getValue() != 1) {
+ if (TypeInfo.inferScalarType(ScalarStep) != IVTy) {
+ ScalarStep =
+ Builder.createWidenCast(Instruction::Trunc, ScalarStep, IVTy);
+ }
+
+ std::optional<FastMathFlags> FMFs;
+ if (IVTy->isFloatingPointTy())
+ FMFs = VPI->getFastMathFlags();
+
+ unsigned MulOpc =
+ IVTy->isFloatingPointTy() ? Instruction::FMul : Instruction::Mul;
+ VPInstruction *Mul = Builder.createNaryOp(
+ MulOpc, {VectorStep, ScalarStep}, FMFs, R.getDebugLoc());
+ VectorStep = Mul;
+ }
+ VPI->replaceAllUsesWith(VectorStep);
+ VPI->eraseFromParent();
+ }
}
}
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
index 89e372d6b46cfd..706048c47e91c2 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
@@ -155,33 +155,15 @@ void UnrollState::unrollWidenInductionByUF(
if (isa_and_present<FPMathOperator>(ID.getInductionBinOp()))
FMFs = ID.getInductionBinOp()->getFastMathFlags();
- VPValue *VectorStep = &Plan.getVF();
- VPBuilder Builder(PH);
- if (TypeInfo.inferScalarType(VectorStep) != IVTy) {
- Instruction::CastOps CastOp =
- IVTy->isFloatingPointTy() ? Instruction::UIToFP : Instruction::Trunc;
- VectorStep = Builder.createWidenCast(CastOp, VectorStep, IVTy);
- ToSkip.insert(VectorStep->getDefiningRecipe());
- }
-
VPValue *ScalarStep = IV->getStepValue();
- auto *ConstStep = ScalarStep->isLiveIn()
- ? dyn_cast<ConstantInt>(ScalarStep->getLiveInIRValue())
- : nullptr;
- if (!ConstStep || ConstStep->getValue() != 1) {
- if (TypeInfo.inferScalarType(ScalarStep) != IVTy) {
- ScalarStep =
- Builder.createWidenCast(Instruction::Trunc, ScalarStep, IVTy);
- ToSkip.insert(ScalarStep->getDefiningRecipe());
- }
+ VPBuilder Builder(PH);
+ VPInstruction *VectorStep =
+ Builder.createNaryOp(VPInstruction::WideIVStep,
+ {&Plan.getVF(), ScalarStep,
+ Plan.getOrAddLiveIn(Constant::getNullValue(IVTy))},
+ FMFs, IV->getDebugLoc());
- unsigned MulOpc =
- IVTy->isFloatingPointTy() ? Instruction::FMul : Instruction::Mul;
- VPInstruction *Mul = Builder.createNaryOp(MulOpc, {VectorStep, ScalarStep},
- FMFs, IV->getDebugLoc());
- VectorStep = Mul;
- ToSkip.insert(Mul);
- }
+ ToSkip.insert(VectorStep);
// Now create recipes to compute the induction steps for part 1 .. UF. Part 0
// remains the header phi. Parts > 0 are computed by adding Step to the
>From 6b671a750396006c75389817bf29087bd163c590 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Tue, 21 Jan 2025 13:09:11 +0000
Subject: [PATCH 3/3] !fixup address latest comments, thanks
---
.../Transforms/Vectorize/VPlanPatternMatch.h | 17 ++++
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 1 +
.../Transforms/Vectorize/VPlanTransforms.cpp | 87 +++++++++++--------
.../Transforms/Vectorize/VPlanTransforms.h | 4 +-
4 files changed, 70 insertions(+), 39 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
index 4866426ad88486..ccd7f64ff00b6c 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
@@ -251,6 +251,23 @@ m_VPInstruction(const Op0_t &Op0, const Op1_t &Op1) {
return BinaryVPInstruction_match<Op0_t, Op1_t, Opcode>(Op0, Op1);
}
+template <typename Op0_t, typename Op1_t, typename Op2_t, unsigned Opcode,
+ bool Commutative, typename... RecipeTys>
+using TernaryRecipe_match = Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t>,
+ Opcode, Commutative, RecipeTys...>;
+
+template <typename Op0_t, typename Op1_t, typename Op2_t, unsigned Opcode>
+using TernaryVPInstruction_match =
+ TernaryRecipe_match<Op0_t, Op1_t, Op2_t, Opcode, /*Commutative*/ false,
+ VPInstruction>;
+
+template <unsigned Opcode, typename Op0_t, typename Op1_t, typename Op2_t>
+inline TernaryVPInstruction_match<Op0_t, Op1_t, Op2_t, Opcode>
+m_VPInstruction(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
+ return TernaryVPInstruction_match<Op0_t, Op1_t, Op2_t, Opcode>(
+ {Op0, Op1, Op2});
+}
+
template <typename Op0_t>
inline UnaryVPInstruction_match<Op0_t, VPInstruction::Not>
m_Not(const Op0_t &Op0) {
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index b808442c454f14..8a2901b020ebe2 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -773,6 +773,7 @@ bool VPInstruction::opcodeMayReadOrWriteFromMemory() const {
case VPInstruction::LogicalAnd:
case VPInstruction::Not:
case VPInstruction::PtrAdd:
+ case VPInstruction::WideIVStep:
return false;
default:
return true;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 028c4464942439..8c4c699906fd01 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -968,6 +968,16 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
TypeInfo.inferScalarType(R.getOperand(1)) ==
TypeInfo.inferScalarType(R.getVPSingleValue()))
return R.getVPSingleValue()->replaceAllUsesWith(R.getOperand(1));
+
+ if (match(&R, m_VPInstruction<VPInstruction::WideIVStep>(
+ m_VPValue(X), m_SpecificInt(1), m_VPValue(Y)))) {
+ if (TypeInfo.inferScalarType(X) != TypeInfo.inferScalarType(Y)) {
+ X = new VPWidenCastRecipe(Instruction::Trunc, X,
+ TypeInfo.inferScalarType(Y));
+ X->getDefiningRecipe()->insertBefore(&R);
+ }
+ R.getVPSingleValue()->replaceAllUsesWith(X);
+ }
}
/// Try to simplify the recipes in \p Plan. Use \p CanonicalIVTy as type for all
@@ -2050,9 +2060,10 @@ void VPlanTransforms::createInterleaveGroups(
}
}
-void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) {
- Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
- VPTypeAnalysis TypeInfo(CanonicalIVType);
+void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan,
+ Type *CanonicalIVTy) {
+ using namespace llvm::VPlanPatternMatch;
+ VPTypeAnalysis TypeInfo(CanonicalIVTy);
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_deep(Plan.getEntry()))) {
@@ -2070,42 +2081,44 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) {
continue;
}
- auto *VPI = dyn_cast<VPInstruction>(&R);
- if (VPI && VPI->getOpcode() == VPInstruction::WideIVStep) {
- VPBuilder Builder(VPI->getParent(), VPI->getIterator());
- VPValue *VectorStep = VPI->getOperand(0);
- Type *IVTy = TypeInfo.inferScalarType(VPI->getOperand(2));
- if (TypeInfo.inferScalarType(VectorStep) != IVTy) {
- Instruction::CastOps CastOp = IVTy->isFloatingPointTy()
- ? Instruction::UIToFP
- : Instruction::Trunc;
- VectorStep = Builder.createWidenCast(CastOp, VectorStep, IVTy);
- }
-
- VPValue *ScalarStep = VPI->getOperand(1);
- auto *ConstStep =
- ScalarStep->isLiveIn()
- ? dyn_cast<ConstantInt>(ScalarStep->getLiveInIRValue())
- : nullptr;
- if (!ConstStep || ConstStep->getValue() != 1) {
- if (TypeInfo.inferScalarType(ScalarStep) != IVTy) {
- ScalarStep =
- Builder.createWidenCast(Instruction::Trunc, ScalarStep, IVTy);
- }
-
- std::optional<FastMathFlags> FMFs;
- if (IVTy->isFloatingPointTy())
- FMFs = VPI->getFastMathFlags();
+ VPValue *VectorStep;
+ VPValue *ScalarStep;
+ VPValue *IVTyOp;
+ if (!match(&R, m_VPInstruction<VPInstruction::WideIVStep>(
+ m_VPValue(VectorStep), m_VPValue(ScalarStep),
+ m_VPValue(IVTyOp))))
+ continue;
+ auto *VPI = cast<VPInstruction>(&R);
+ VPBuilder Builder(VPI->getParent(), VPI->getIterator());
+ Type *IVTy = TypeInfo.inferScalarType(IVTyOp);
+ if (TypeInfo.inferScalarType(VectorStep) != IVTy) {
+ Instruction::CastOps CastOp = IVTy->isFloatingPointTy()
+ ? Instruction::UIToFP
+ : Instruction::Trunc;
+ VectorStep = Builder.createWidenCast(CastOp, VectorStep, IVTy);
+ }
- unsigned MulOpc =
- IVTy->isFloatingPointTy() ? Instruction::FMul : Instruction::Mul;
- VPInstruction *Mul = Builder.createNaryOp(
- MulOpc, {VectorStep, ScalarStep}, FMFs, R.getDebugLoc());
- VectorStep = Mul;
- }
- VPI->replaceAllUsesWith(VectorStep);
- VPI->eraseFromParent();
+ auto *ConstStep =
+ ScalarStep->isLiveIn()
+ ? dyn_cast<ConstantInt>(ScalarStep->getLiveInIRValue())
+ : nullptr;
+ assert(!ConstStep || ConstStep->getValue() != 1);
+ if (TypeInfo.inferScalarType(ScalarStep) != IVTy) {
+ ScalarStep =
+ Builder.createWidenCast(Instruction::Trunc, ScalarStep, IVTy);
}
+
+ std::optional<FastMathFlags> FMFs;
+ if (IVTy->isFloatingPointTy())
+ FMFs = VPI->getFastMathFlags();
+
+ unsigned MulOpc =
+ IVTy->isFloatingPointTy() ? Instruction::FMul : Instruction::Mul;
+ VPInstruction *Mul = Builder.createNaryOp(
+ MulOpc, {VectorStep, ScalarStep}, FMFs, R.getDebugLoc());
+ VectorStep = Mul;
+ VPI->replaceAllUsesWith(VectorStep);
+ VPI->eraseFromParent();
}
}
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index af75816d595d7b..3da18dbf7a9da1 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -136,9 +136,9 @@ struct VPlanTransforms {
VPRecipeBuilder &RecipeBuilder);
/// Lower abstract recipes to concrete ones, that can be codegen'd.
- static void convertToConcreteRecipes(VPlan &Plan);
+ static void convertToConcreteRecipes(VPlan &Plan, Type *CanonicalIVTy);
-static void simplifyRecipes(VPlan &Plan, Type *CanonicalIVTy);
+ static void simplifyRecipes(VPlan &Plan, Type *CanonicalIVTy);
/// If there's a single exit block, optimize its phi recipes that use exiting
/// IV values by feeding them precomputed end values instead, possibly taken
More information about the llvm-commits
mailing list