[llvm] 2a6b09e - [LV] Use type from InsertPos for cost computation of interleave groups.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 18 19:13:20 PDT 2024
Author: Florian Hahn
Date: 2024-10-18T19:12:40-07:00
New Revision: 2a6b09e0d3d3c1a05d3d5165202a6e68900974b1
URL: https://github.com/llvm/llvm-project/commit/2a6b09e0d3d3c1a05d3d5165202a6e68900974b1
DIFF: https://github.com/llvm/llvm-project/commit/2a6b09e0d3d3c1a05d3d5165202a6e68900974b1.diff
LOG: [LV] Use type from InsertPos for cost computation of interleave groups.
Previously the legacy cost model would pick the type for the cost
computation depending on the order of the members in the input IR.
This is incompatible with the VPlan-based cost model (independent of
original IR order) and also doesn't match code-gen, which uses the type
of the insert position.
Update the legacy cost model to use the type (and address space) from
the Group's insert position.
This brings the legacy cost model in line with the legacy cost model and
fixes a divergence between both models.
Note that the X86 cost model seems to assign different costs to groups
with i64 and double types. Added a TODO to check.
Fixes https://github.com/llvm/llvm-project/issues/112922.
Added:
Modified:
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 857efbdf687cb8..ce0903b838aa8a 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -5738,14 +5738,15 @@ LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
InstructionCost
LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
ElementCount VF) {
- Type *ValTy = getLoadStoreType(I);
- auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
- unsigned AS = getLoadStoreAddressSpace(I);
- enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
-
const auto *Group = getInterleavedAccessGroup(I);
assert(Group && "Fail to get an interleaved access group.");
+ Instruction *InsertPos = Group->getInsertPos();
+ Type *ValTy = getLoadStoreType(InsertPos);
+ auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
+ unsigned AS = getLoadStoreAddressSpace(InsertPos);
+ enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
+
unsigned InterleaveFactor = Group->getFactor();
auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
@@ -5760,8 +5761,9 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
(Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
(isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
- I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
- AS, CostKind, Legal->isMaskRequired(I), UseMaskForGaps);
+ InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
+ Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
+ UseMaskForGaps);
if (Group->isReverse()) {
// TODO: Add support for reversed masked interleaved access.
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index f4a1f58debbaef..41f13cc2d9a978 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -2958,11 +2958,20 @@ void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
InstructionCost VPInterleaveRecipe::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
- Instruction *I = getInsertPos();
+ Instruction *InsertPos = getInsertPos();
+ // Find the VPValue index of the interleave group. We need to skip gaps.
+ unsigned InsertPosIdx = 0;
+ for (unsigned Idx = 0; IG->getFactor(); ++Idx)
+ if (auto *Member = IG->getMember(Idx)) {
+ if (Member == InsertPos)
+ break;
+ InsertPosIdx++;
+ }
Type *ValTy = Ctx.Types.inferScalarType(
- getNumDefinedValues() > 0 ? getVPValue(0) : getStoredValues()[0]);
+ getNumDefinedValues() > 0 ? getVPValue(InsertPosIdx)
+ : getStoredValues()[InsertPosIdx]);
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
- unsigned AS = getLoadStoreAddressSpace(I);
+ unsigned AS = getLoadStoreAddressSpace(InsertPos);
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
unsigned InterleaveFactor = IG->getFactor();
@@ -2976,8 +2985,8 @@ InstructionCost VPInterleaveRecipe::computeCost(ElementCount VF,
// Calculate the cost of the whole interleaved group.
InstructionCost Cost = Ctx.TTI.getInterleavedMemoryOpCost(
- I->getOpcode(), WideVecTy, IG->getFactor(), Indices, IG->getAlign(), AS,
- CostKind, getMask(), NeedsMaskForGaps);
+ InsertPos->getOpcode(), WideVecTy, IG->getFactor(), Indices,
+ IG->getAlign(), AS, CostKind, getMask(), NeedsMaskForGaps);
if (!IG->isReverse())
return Cost;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll
index 7d1d326641e124..ad0068dc3f6be7 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll
@@ -586,6 +586,184 @@ exit:
ret void
}
+; Test case for https://github.com/llvm/llvm-project/issues/112922.
+define void @interleave_store_double_i64(ptr %dst) {
+; CHECK-LABEL: define void @interleave_store_double_i64(
+; CHECK-SAME: ptr [[DST:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[VEC_IND]] to <2 x double>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], <i64 2, i64 2>
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]], i32 1
+; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_1]], align 8
+; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_0]], align 8
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.1 = getelementptr { double, i64 }, ptr %dst, i64 %iv, i32 1
+ store i64 %iv, ptr %gep.1, align 8
+ %gep.0 = getelementptr { double, i64 }, ptr %dst, i64 %iv
+ store double 0.000000e+00, ptr %gep.0, align 8
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv, 1
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+define void @interleave_store_i64_double(ptr %dst) {
+; CHECK-LABEL: define void @interleave_store_i64_double(
+; CHECK-SAME: ptr [[DST:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_0]], align 8
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]], i32 1
+; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_1]], align 8
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.0 = getelementptr { double, i64 }, ptr %dst, i64 %iv
+ store double 0.000000e+00, ptr %gep.0, align 8
+ %gep.1 = getelementptr { double, i64 }, ptr %dst, i64 %iv, i32 1
+ store i64 %iv, ptr %gep.1, align 8
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv, 1
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; TODO: The interleave group should likely have the same cost as @interleave_store_double_i64.
+define void @interleave_store_double_i64_2(ptr %dst) {
+; CHECK-LABEL: define void @interleave_store_double_i64_2(
+; CHECK-SAME: ptr [[DST:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]], i32 1
+; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_1]], align 8
+; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_0]], align 8
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.1 = getelementptr { i64, double }, ptr %dst, i64 %iv, i32 1
+ store double 0.000000e+00, ptr %gep.1, align 8
+ %gep.0 = getelementptr { i64, double }, ptr %dst, i64 %iv
+ store i64 %iv, ptr %gep.0, align 8
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv, 1
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+define void @interleave_store_i64_double_2(ptr %dst) {
+; CHECK-LABEL: define void @interleave_store_i64_double_2(
+; CHECK-SAME: ptr [[DST:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[VEC_IND]] to <2 x double>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], <i64 2, i64 2>
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_0]], align 8
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]], i32 1
+; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_1]], align 8
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.0 = getelementptr { i64, double }, ptr %dst, i64 %iv
+ store i64 %iv, ptr %gep.0, align 8
+ %gep.1 = getelementptr { i64, double }, ptr %dst, i64 %iv, i32 1
+ store double 0.000000e+00, ptr %gep.1, align 8
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv, 1
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+
+
attributes #0 = { "target-features"="+sse4.2" }
attributes #1 = { "min-legal-vector-width"="0" "target-cpu"="cascadelake" }
@@ -601,4 +779,8 @@ attributes #1 = { "min-legal-vector-width"="0" "target-cpu"="cascadelake" }
; CHECK: [[META8]] = distinct !{[[META8]], !"LVerDomain"}
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]]}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]}
+; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META2]], [[META1]]}
+; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]]}
+; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META2]], [[META1]]}
;.
More information about the llvm-commits
mailing list