[llvm] 1a85027 - [LV] Re-compute cost of scalarized load users.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 1 14:33:45 PDT 2025
Author: Florian Hahn
Date: 2025-10-01T22:30:18+01:00
New Revision: 1a850279c5a6e3662f3a7b40a9ea097838c2aca0
URL: https://github.com/llvm/llvm-project/commit/1a850279c5a6e3662f3a7b40a9ea097838c2aca0
DIFF: https://github.com/llvm/llvm-project/commit/1a850279c5a6e3662f3a7b40a9ea097838c2aca0.diff
LOG: [LV] Re-compute cost of scalarized load users.
If there are direct memory op users of the newly scalarized load,
their cost may have changed because there's no scalarization
overhead for the operand. Update it.
This ensures assigning consistent costs to scalarized memory
instructions that themselves have scalarized memory instructions as
operands.
Added:
Modified:
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index fa5be21dc2b8a..e5d6c8118eb55 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -5699,6 +5699,20 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
Worklist.push_back(InstOp);
}
+ auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) {
+ // If there are direct memory op users of the newly scalarized load,
+ // their cost may have changed because there's no scalarization
+ // overhead for the operand. Update it.
+ for (User *U : LI->users()) {
+ if (!isa<LoadInst, StoreInst>(U))
+ continue;
+ if (getWideningDecision(cast<Instruction>(U), VF) != CM_Scalarize)
+ continue;
+ setWideningDecision(
+ cast<Instruction>(U), VF, CM_Scalarize,
+ getMemInstScalarizationCost(cast<Instruction>(U), VF));
+ }
+ };
for (auto *I : AddrDefs) {
if (isa<LoadInst>(I)) {
// Setting the desired widening decision should ideally be handled in
@@ -5708,21 +5722,24 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
InstWidening Decision = getWideningDecision(I, VF);
if (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
(!isPredicatedInst(I) && !Legal->isUniformMemOp(*I, VF) &&
- Decision == CM_Scalarize))
+ Decision == CM_Scalarize)) {
// Scalarize a widened load of address or update the cost of a scalar
// load of an address.
setWideningDecision(
I, VF, CM_Scalarize,
(VF.getKnownMinValue() *
getMemoryInstructionCost(I, ElementCount::getFixed(1))));
- else if (const auto *Group = getInterleavedAccessGroup(I)) {
+ UpdateMemOpUserCost(cast<LoadInst>(I));
+ } else if (const auto *Group = getInterleavedAccessGroup(I)) {
// Scalarize an interleave group of address loads.
for (unsigned I = 0; I < Group->getFactor(); ++I) {
- if (Instruction *Member = Group->getMember(I))
+ if (Instruction *Member = Group->getMember(I)) {
setWideningDecision(
Member, VF, CM_Scalarize,
(VF.getKnownMinValue() *
getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
+ UpdateMemOpUserCost(cast<LoadInst>(Member));
+ }
}
}
} else {
diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
index d93932585460f..87848730c8f01 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
@@ -6,38 +6,184 @@
define void @test_store_initially_interleave(i32 %n, ptr noalias %src) #0 {
; I64-LABEL: define void @test_store_initially_interleave(
; I64-SAME: i32 [[N:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
-; I64-NEXT: [[ENTRY:.*]]:
-; I64-NEXT: br label %[[LOOP:.*]]
-; I64: [[LOOP]]:
-; I64-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INC:%.*]], %[[LOOP]] ]
-; I64-NEXT: [[CONV:%.*]] = uitofp i32 [[IV]] to double
+; I64-NEXT: [[ITER_CHECK:.*:]]
+; I64-NEXT: [[TMP4:%.*]] = add i32 [[N]], 1
+; I64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP4]], 4
+; I64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; I64: [[VECTOR_SCEVCHECK]]:
+; I64-NEXT: [[TMP1:%.*]] = icmp slt i32 [[N]], 0
+; I64-NEXT: br i1 [[TMP1]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; I64: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; I64-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ule i32 [[TMP4]], 16
+; I64-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; I64: [[VECTOR_PH]]:
+; I64-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP4]], 16
+; I64-NEXT: [[TMP2:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
+; I64-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 16, i32 [[N_MOD_VF]]
+; I64-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP4]], [[TMP3]]
+; I64-NEXT: br label %[[VECTOR_BODY:.*]]
+; I64: [[VECTOR_BODY]]:
+; I64-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I64-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I64-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
+; I64-NEXT: [[STEP_ADD_2:%.*]] = add <4 x i32> [[STEP_ADD]], splat (i32 4)
+; I64-NEXT: [[STEP_ADD_3:%.*]] = add <4 x i32> [[STEP_ADD_2]], splat (i32 4)
+; I64-NEXT: [[IV:%.*]] = add i32 [[INDEX]], 0
+; I64-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1
+; I64-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 2
+; I64-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 3
+; I64-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 4
+; I64-NEXT: [[TMP9:%.*]] = add i32 [[INDEX]], 5
+; I64-NEXT: [[TMP10:%.*]] = add i32 [[INDEX]], 6
+; I64-NEXT: [[TMP11:%.*]] = add i32 [[INDEX]], 7
+; I64-NEXT: [[TMP12:%.*]] = add i32 [[INDEX]], 8
+; I64-NEXT: [[TMP13:%.*]] = add i32 [[INDEX]], 9
+; I64-NEXT: [[TMP14:%.*]] = add i32 [[INDEX]], 10
+; I64-NEXT: [[TMP15:%.*]] = add i32 [[INDEX]], 11
+; I64-NEXT: [[TMP16:%.*]] = add i32 [[INDEX]], 12
+; I64-NEXT: [[TMP17:%.*]] = add i32 [[INDEX]], 13
+; I64-NEXT: [[TMP18:%.*]] = add i32 [[INDEX]], 14
+; I64-NEXT: [[TMP19:%.*]] = add i32 [[INDEX]], 15
+; I64-NEXT: [[TMP20:%.*]] = uitofp <4 x i32> [[VEC_IND]] to <4 x double>
+; I64-NEXT: [[TMP21:%.*]] = uitofp <4 x i32> [[STEP_ADD]] to <4 x double>
+; I64-NEXT: [[TMP22:%.*]] = uitofp <4 x i32> [[STEP_ADD_2]] to <4 x double>
+; I64-NEXT: [[TMP23:%.*]] = uitofp <4 x i32> [[STEP_ADD_3]] to <4 x double>
; I64-NEXT: [[ADD_PTR_I:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[IV]]
+; I64-NEXT: [[TMP25:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP5]]
+; I64-NEXT: [[TMP26:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP6]]
+; I64-NEXT: [[TMP27:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP7]]
+; I64-NEXT: [[TMP28:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP8]]
+; I64-NEXT: [[TMP29:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP9]]
+; I64-NEXT: [[TMP30:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP10]]
+; I64-NEXT: [[TMP31:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP11]]
+; I64-NEXT: [[TMP32:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP12]]
+; I64-NEXT: [[TMP33:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP13]]
+; I64-NEXT: [[TMP34:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP14]]
+; I64-NEXT: [[TMP35:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP15]]
+; I64-NEXT: [[TMP36:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP16]]
+; I64-NEXT: [[TMP37:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP17]]
+; I64-NEXT: [[TMP38:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP18]]
+; I64-NEXT: [[TMP39:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP19]]
; I64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADD_PTR_I]], align 4
+; I64-NEXT: [[TMP41:%.*]] = load ptr, ptr [[TMP25]], align 4
+; I64-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP26]], align 4
+; I64-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP27]], align 4
+; I64-NEXT: [[TMP44:%.*]] = load ptr, ptr [[TMP28]], align 4
+; I64-NEXT: [[TMP45:%.*]] = load ptr, ptr [[TMP29]], align 4
+; I64-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP30]], align 4
+; I64-NEXT: [[TMP47:%.*]] = load ptr, ptr [[TMP31]], align 4
+; I64-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP32]], align 4
+; I64-NEXT: [[TMP49:%.*]] = load ptr, ptr [[TMP33]], align 4
+; I64-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP34]], align 4
+; I64-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP35]], align 4
+; I64-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP36]], align 4
+; I64-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP37]], align 4
+; I64-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP38]], align 4
+; I64-NEXT: [[TMP55:%.*]] = load ptr, ptr [[TMP39]], align 4
+; I64-NEXT: [[CONV:%.*]] = extractelement <4 x double> [[TMP20]], i32 0
; I64-NEXT: store double [[CONV]], ptr [[TMP0]], align 4
-; I64-NEXT: [[INC]] = add i32 [[IV]], 1
-; I64-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], [[N]]
-; I64-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
-; I64: [[EXIT]]:
-; I64-NEXT: ret void
+; I64-NEXT: [[TMP57:%.*]] = extractelement <4 x double> [[TMP20]], i32 1
+; I64-NEXT: store double [[TMP57]], ptr [[TMP41]], align 4
+; I64-NEXT: [[TMP58:%.*]] = extractelement <4 x double> [[TMP20]], i32 2
+; I64-NEXT: store double [[TMP58]], ptr [[TMP42]], align 4
+; I64-NEXT: [[TMP59:%.*]] = extractelement <4 x double> [[TMP20]], i32 3
+; I64-NEXT: store double [[TMP59]], ptr [[TMP43]], align 4
+; I64-NEXT: [[TMP60:%.*]] = extractelement <4 x double> [[TMP21]], i32 0
+; I64-NEXT: store double [[TMP60]], ptr [[TMP44]], align 4
+; I64-NEXT: [[TMP61:%.*]] = extractelement <4 x double> [[TMP21]], i32 1
+; I64-NEXT: store double [[TMP61]], ptr [[TMP45]], align 4
+; I64-NEXT: [[TMP62:%.*]] = extractelement <4 x double> [[TMP21]], i32 2
+; I64-NEXT: store double [[TMP62]], ptr [[TMP46]], align 4
+; I64-NEXT: [[TMP63:%.*]] = extractelement <4 x double> [[TMP21]], i32 3
+; I64-NEXT: store double [[TMP63]], ptr [[TMP47]], align 4
+; I64-NEXT: [[TMP64:%.*]] = extractelement <4 x double> [[TMP22]], i32 0
+; I64-NEXT: store double [[TMP64]], ptr [[TMP48]], align 4
+; I64-NEXT: [[TMP65:%.*]] = extractelement <4 x double> [[TMP22]], i32 1
+; I64-NEXT: store double [[TMP65]], ptr [[TMP49]], align 4
+; I64-NEXT: [[TMP66:%.*]] = extractelement <4 x double> [[TMP22]], i32 2
+; I64-NEXT: store double [[TMP66]], ptr [[TMP50]], align 4
+; I64-NEXT: [[TMP67:%.*]] = extractelement <4 x double> [[TMP22]], i32 3
+; I64-NEXT: store double [[TMP67]], ptr [[TMP51]], align 4
+; I64-NEXT: [[TMP68:%.*]] = extractelement <4 x double> [[TMP23]], i32 0
+; I64-NEXT: store double [[TMP68]], ptr [[TMP52]], align 4
+; I64-NEXT: [[TMP69:%.*]] = extractelement <4 x double> [[TMP23]], i32 1
+; I64-NEXT: store double [[TMP69]], ptr [[TMP53]], align 4
+; I64-NEXT: [[TMP70:%.*]] = extractelement <4 x double> [[TMP23]], i32 2
+; I64-NEXT: store double [[TMP70]], ptr [[TMP54]], align 4
+; I64-NEXT: [[TMP71:%.*]] = extractelement <4 x double> [[TMP23]], i32 3
+; I64-NEXT: store double [[TMP71]], ptr [[TMP55]], align 4
+; I64-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
+; I64-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD_3]], splat (i32 4)
+; I64-NEXT: [[TMP72:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; I64-NEXT: br i1 [[TMP72]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; I64: [[MIDDLE_BLOCK]]:
+; I64-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; I64: [[VEC_EPILOG_ITER_CHECK]]:
+; I64-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP3]], 4
+; I64-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]]
+; I64: [[VEC_EPILOG_PH]]:
+; I64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; I64-NEXT: [[N_MOD_VF2:%.*]] = urem i32 [[TMP4]], 4
+; I64-NEXT: [[TMP73:%.*]] = icmp eq i32 [[N_MOD_VF2]], 0
+; I64-NEXT: [[TMP74:%.*]] = select i1 [[TMP73]], i32 4, i32 [[N_MOD_VF2]]
+; I64-NEXT: [[N_VEC3:%.*]] = sub i32 [[TMP4]], [[TMP74]]
+; I64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[BC_RESUME_VAL]], i64 0
+; I64-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; I64-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
+; I64-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; I64: [[VEC_EPILOG_VECTOR_BODY]]:
+; I64-NEXT: [[INDEX4:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; I64-NEXT: [[VEC_IND5:%.*]] = phi <4 x i32> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT7:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; I64-NEXT: [[TMP75:%.*]] = add i32 [[INDEX4]], 0
+; I64-NEXT: [[TMP76:%.*]] = add i32 [[INDEX4]], 1
+; I64-NEXT: [[TMP77:%.*]] = add i32 [[INDEX4]], 2
+; I64-NEXT: [[TMP78:%.*]] = add i32 [[INDEX4]], 3
+; I64-NEXT: [[TMP79:%.*]] = uitofp <4 x i32> [[VEC_IND5]] to <4 x double>
+; I64-NEXT: [[TMP80:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP75]]
+; I64-NEXT: [[TMP81:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP76]]
+; I64-NEXT: [[TMP82:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP77]]
+; I64-NEXT: [[TMP83:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP78]]
+; I64-NEXT: [[TMP84:%.*]] = load ptr, ptr [[TMP80]], align 4
+; I64-NEXT: [[TMP85:%.*]] = load ptr, ptr [[TMP81]], align 4
+; I64-NEXT: [[TMP86:%.*]] = load ptr, ptr [[TMP82]], align 4
+; I64-NEXT: [[TMP87:%.*]] = load ptr, ptr [[TMP83]], align 4
+; I64-NEXT: [[TMP88:%.*]] = extractelement <4 x double> [[TMP79]], i32 0
+; I64-NEXT: store double [[TMP88]], ptr [[TMP84]], align 4
+; I64-NEXT: [[TMP89:%.*]] = extractelement <4 x double> [[TMP79]], i32 1
+; I64-NEXT: store double [[TMP89]], ptr [[TMP85]], align 4
+; I64-NEXT: [[TMP90:%.*]] = extractelement <4 x double> [[TMP79]], i32 2
+; I64-NEXT: store double [[TMP90]], ptr [[TMP86]], align 4
+; I64-NEXT: [[TMP91:%.*]] = extractelement <4 x double> [[TMP79]], i32 3
+; I64-NEXT: store double [[TMP91]], ptr [[TMP87]], align 4
+; I64-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX4]], 4
+; I64-NEXT: [[VEC_IND_NEXT7]] = add <4 x i32> [[VEC_IND5]], splat (i32 4)
+; I64-NEXT: [[TMP92:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC3]]
+; I64-NEXT: br i1 [[TMP92]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; I64: [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; I64-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]]
+; I64: [[VEC_EPILOG_SCALAR_PH]]:
;
; I32-LABEL: define void @test_store_initially_interleave(
; I32-SAME: i32 [[N:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
; I32-NEXT: [[ENTRY:.*:]]
; I32-NEXT: [[TMP0:%.*]] = add i32 [[N]], 1
-; I32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 8
-; I32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; I32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4
+; I32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; I32: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; I32-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ule i32 [[TMP0]], 16
+; I32-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
; I32: [[VECTOR_PH]]:
-; I32-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 8
+; I32-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 16
; I32-NEXT: [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
-; I32-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 8, i32 [[N_MOD_VF]]
+; I32-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 16, i32 [[N_MOD_VF]]
; I32-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]]
; I32-NEXT: br label %[[VECTOR_BODY:.*]]
; I32: [[VECTOR_BODY]]:
; I32-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; I32-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; I32-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], splat (i32 2)
-; I32-NEXT: [[STEP_ADD_2:%.*]] = add <2 x i32> [[STEP_ADD]], splat (i32 2)
-; I32-NEXT: [[STEP_ADD_3:%.*]] = add <2 x i32> [[STEP_ADD_2]], splat (i32 2)
+; I32-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I32-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
+; I32-NEXT: [[STEP_ADD_2:%.*]] = add <4 x i32> [[STEP_ADD]], splat (i32 4)
+; I32-NEXT: [[STEP_ADD_3:%.*]] = add <4 x i32> [[STEP_ADD_2]], splat (i32 4)
; I32-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0
; I32-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1
; I32-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 2
@@ -46,10 +192,18 @@ define void @test_store_initially_interleave(i32 %n, ptr noalias %src) #0 {
; I32-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 5
; I32-NEXT: [[TMP9:%.*]] = add i32 [[INDEX]], 6
; I32-NEXT: [[TMP10:%.*]] = add i32 [[INDEX]], 7
-; I32-NEXT: [[TMP11:%.*]] = uitofp <2 x i32> [[VEC_IND]] to <2 x double>
-; I32-NEXT: [[TMP12:%.*]] = uitofp <2 x i32> [[STEP_ADD]] to <2 x double>
-; I32-NEXT: [[TMP13:%.*]] = uitofp <2 x i32> [[STEP_ADD_2]] to <2 x double>
-; I32-NEXT: [[TMP14:%.*]] = uitofp <2 x i32> [[STEP_ADD_3]] to <2 x double>
+; I32-NEXT: [[TMP11:%.*]] = add i32 [[INDEX]], 8
+; I32-NEXT: [[TMP12:%.*]] = add i32 [[INDEX]], 9
+; I32-NEXT: [[TMP13:%.*]] = add i32 [[INDEX]], 10
+; I32-NEXT: [[TMP14:%.*]] = add i32 [[INDEX]], 11
+; I32-NEXT: [[TMP40:%.*]] = add i32 [[INDEX]], 12
+; I32-NEXT: [[TMP41:%.*]] = add i32 [[INDEX]], 13
+; I32-NEXT: [[TMP42:%.*]] = add i32 [[INDEX]], 14
+; I32-NEXT: [[TMP43:%.*]] = add i32 [[INDEX]], 15
+; I32-NEXT: [[TMP44:%.*]] = uitofp <4 x i32> [[VEC_IND]] to <4 x double>
+; I32-NEXT: [[TMP45:%.*]] = uitofp <4 x i32> [[STEP_ADD]] to <4 x double>
+; I32-NEXT: [[TMP46:%.*]] = uitofp <4 x i32> [[STEP_ADD_2]] to <4 x double>
+; I32-NEXT: [[TMP55:%.*]] = uitofp <4 x i32> [[STEP_ADD_3]] to <4 x double>
; I32-NEXT: [[TMP15:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP3]]
; I32-NEXT: [[TMP16:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP4]]
; I32-NEXT: [[TMP17:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP5]]
@@ -58,6 +212,14 @@ define void @test_store_initially_interleave(i32 %n, ptr noalias %src) #0 {
; I32-NEXT: [[TMP20:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP8]]
; I32-NEXT: [[TMP21:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP9]]
; I32-NEXT: [[TMP22:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP10]]
+; I32-NEXT: [[TMP56:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP11]]
+; I32-NEXT: [[TMP57:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP12]]
+; I32-NEXT: [[TMP58:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP13]]
+; I32-NEXT: [[TMP59:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP14]]
+; I32-NEXT: [[TMP60:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP40]]
+; I32-NEXT: [[TMP61:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP41]]
+; I32-NEXT: [[TMP62:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP42]]
+; I32-NEXT: [[TMP71:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP43]]
; I32-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP15]], align 4
; I32-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP16]], align 4
; I32-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP17]], align 4
@@ -66,29 +228,96 @@ define void @test_store_initially_interleave(i32 %n, ptr noalias %src) #0 {
; I32-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP20]], align 4
; I32-NEXT: [[TMP29:%.*]] = load ptr, ptr [[TMP21]], align 4
; I32-NEXT: [[TMP30:%.*]] = load ptr, ptr [[TMP22]], align 4
-; I32-NEXT: [[TMP31:%.*]] = extractelement <2 x double> [[TMP11]], i32 0
+; I32-NEXT: [[TMP47:%.*]] = load ptr, ptr [[TMP56]], align 4
+; I32-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP57]], align 4
+; I32-NEXT: [[TMP49:%.*]] = load ptr, ptr [[TMP58]], align 4
+; I32-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP59]], align 4
+; I32-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP60]], align 4
+; I32-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP61]], align 4
+; I32-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP62]], align 4
+; I32-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP71]], align 4
+; I32-NEXT: [[TMP31:%.*]] = extractelement <4 x double> [[TMP44]], i32 0
; I32-NEXT: store double [[TMP31]], ptr [[TMP23]], align 4
-; I32-NEXT: [[TMP32:%.*]] = extractelement <2 x double> [[TMP11]], i32 1
+; I32-NEXT: [[TMP32:%.*]] = extractelement <4 x double> [[TMP44]], i32 1
; I32-NEXT: store double [[TMP32]], ptr [[TMP24]], align 4
-; I32-NEXT: [[TMP33:%.*]] = extractelement <2 x double> [[TMP12]], i32 0
+; I32-NEXT: [[TMP33:%.*]] = extractelement <4 x double> [[TMP44]], i32 2
; I32-NEXT: store double [[TMP33]], ptr [[TMP25]], align 4
-; I32-NEXT: [[TMP34:%.*]] = extractelement <2 x double> [[TMP12]], i32 1
+; I32-NEXT: [[TMP34:%.*]] = extractelement <4 x double> [[TMP44]], i32 3
; I32-NEXT: store double [[TMP34]], ptr [[TMP26]], align 4
-; I32-NEXT: [[TMP35:%.*]] = extractelement <2 x double> [[TMP13]], i32 0
+; I32-NEXT: [[TMP35:%.*]] = extractelement <4 x double> [[TMP45]], i32 0
; I32-NEXT: store double [[TMP35]], ptr [[TMP27]], align 4
-; I32-NEXT: [[TMP36:%.*]] = extractelement <2 x double> [[TMP13]], i32 1
+; I32-NEXT: [[TMP36:%.*]] = extractelement <4 x double> [[TMP45]], i32 1
; I32-NEXT: store double [[TMP36]], ptr [[TMP28]], align 4
-; I32-NEXT: [[TMP37:%.*]] = extractelement <2 x double> [[TMP14]], i32 0
+; I32-NEXT: [[TMP37:%.*]] = extractelement <4 x double> [[TMP45]], i32 2
; I32-NEXT: store double [[TMP37]], ptr [[TMP29]], align 4
-; I32-NEXT: [[TMP38:%.*]] = extractelement <2 x double> [[TMP14]], i32 1
+; I32-NEXT: [[TMP38:%.*]] = extractelement <4 x double> [[TMP45]], i32 3
; I32-NEXT: store double [[TMP38]], ptr [[TMP30]], align 4
-; I32-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
-; I32-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[STEP_ADD_3]], splat (i32 2)
+; I32-NEXT: [[TMP63:%.*]] = extractelement <4 x double> [[TMP46]], i32 0
+; I32-NEXT: store double [[TMP63]], ptr [[TMP47]], align 4
+; I32-NEXT: [[TMP64:%.*]] = extractelement <4 x double> [[TMP46]], i32 1
+; I32-NEXT: store double [[TMP64]], ptr [[TMP48]], align 4
+; I32-NEXT: [[TMP65:%.*]] = extractelement <4 x double> [[TMP46]], i32 2
+; I32-NEXT: store double [[TMP65]], ptr [[TMP49]], align 4
+; I32-NEXT: [[TMP66:%.*]] = extractelement <4 x double> [[TMP46]], i32 3
+; I32-NEXT: store double [[TMP66]], ptr [[TMP50]], align 4
+; I32-NEXT: [[TMP67:%.*]] = extractelement <4 x double> [[TMP55]], i32 0
+; I32-NEXT: store double [[TMP67]], ptr [[TMP51]], align 4
+; I32-NEXT: [[TMP68:%.*]] = extractelement <4 x double> [[TMP55]], i32 1
+; I32-NEXT: store double [[TMP68]], ptr [[TMP52]], align 4
+; I32-NEXT: [[TMP69:%.*]] = extractelement <4 x double> [[TMP55]], i32 2
+; I32-NEXT: store double [[TMP69]], ptr [[TMP53]], align 4
+; I32-NEXT: [[TMP70:%.*]] = extractelement <4 x double> [[TMP55]], i32 3
+; I32-NEXT: store double [[TMP70]], ptr [[TMP54]], align 4
+; I32-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
+; I32-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD_3]], splat (i32 4)
; I32-NEXT: [[TMP39:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; I32-NEXT: br i1 [[TMP39]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; I32: [[MIDDLE_BLOCK]]:
-; I32-NEXT: br label %[[SCALAR_PH]]
-; I32: [[SCALAR_PH]]:
+; I32-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; I32: [[VEC_EPILOG_ITER_CHECK]]:
+; I32-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP2]], 4
+; I32-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]]
+; I32: [[VEC_EPILOG_PH]]:
+; I32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; I32-NEXT: [[N_MOD_VF2:%.*]] = urem i32 [[TMP0]], 4
+; I32-NEXT: [[TMP72:%.*]] = icmp eq i32 [[N_MOD_VF2]], 0
+; I32-NEXT: [[TMP73:%.*]] = select i1 [[TMP72]], i32 4, i32 [[N_MOD_VF2]]
+; I32-NEXT: [[N_VEC3:%.*]] = sub i32 [[TMP0]], [[TMP73]]
+; I32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[BC_RESUME_VAL]], i64 0
+; I32-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; I32-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
+; I32-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; I32: [[VEC_EPILOG_VECTOR_BODY]]:
+; I32-NEXT: [[INDEX4:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; I32-NEXT: [[VEC_IND5:%.*]] = phi <4 x i32> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT7:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; I32-NEXT: [[TMP74:%.*]] = add i32 [[INDEX4]], 0
+; I32-NEXT: [[TMP75:%.*]] = add i32 [[INDEX4]], 1
+; I32-NEXT: [[TMP76:%.*]] = add i32 [[INDEX4]], 2
+; I32-NEXT: [[TMP77:%.*]] = add i32 [[INDEX4]], 3
+; I32-NEXT: [[TMP78:%.*]] = uitofp <4 x i32> [[VEC_IND5]] to <4 x double>
+; I32-NEXT: [[TMP79:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP74]]
+; I32-NEXT: [[TMP80:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP75]]
+; I32-NEXT: [[TMP81:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP76]]
+; I32-NEXT: [[TMP82:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP77]]
+; I32-NEXT: [[TMP83:%.*]] = load ptr, ptr [[TMP79]], align 4
+; I32-NEXT: [[TMP84:%.*]] = load ptr, ptr [[TMP80]], align 4
+; I32-NEXT: [[TMP85:%.*]] = load ptr, ptr [[TMP81]], align 4
+; I32-NEXT: [[TMP86:%.*]] = load ptr, ptr [[TMP82]], align 4
+; I32-NEXT: [[TMP87:%.*]] = extractelement <4 x double> [[TMP78]], i32 0
+; I32-NEXT: store double [[TMP87]], ptr [[TMP83]], align 4
+; I32-NEXT: [[TMP88:%.*]] = extractelement <4 x double> [[TMP78]], i32 1
+; I32-NEXT: store double [[TMP88]], ptr [[TMP84]], align 4
+; I32-NEXT: [[TMP89:%.*]] = extractelement <4 x double> [[TMP78]], i32 2
+; I32-NEXT: store double [[TMP89]], ptr [[TMP85]], align 4
+; I32-NEXT: [[TMP90:%.*]] = extractelement <4 x double> [[TMP78]], i32 3
+; I32-NEXT: store double [[TMP90]], ptr [[TMP86]], align 4
+; I32-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX4]], 4
+; I32-NEXT: [[VEC_IND_NEXT7]] = add <4 x i32> [[VEC_IND5]], splat (i32 4)
+; I32-NEXT: [[TMP91:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC3]]
+; I32-NEXT: br i1 [[TMP91]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; I32: [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; I32-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]]
+; I32: [[VEC_EPILOG_SCALAR_PH]]:
;
entry:
br label %loop
@@ -149,7 +378,7 @@ define void @test_store_loaded_value(ptr noalias %src, ptr noalias %dst, i32 %n)
; I64-NEXT: store double [[TMP11]], ptr [[TMP19]], align 8
; I64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; I64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; I64-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; I64-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; I64: [[MIDDLE_BLOCK]]:
; I64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_EXT]], [[N_VEC]]
; I64-NEXT: br i1 [[CMP_N]], [[EXIT_LOOPEXIT:label %.*]], label %[[SCALAR_PH]]
@@ -196,7 +425,7 @@ define void @test_store_loaded_value(ptr noalias %src, ptr noalias %dst, i32 %n)
; I32-NEXT: store double [[TMP11]], ptr [[TMP19]], align 8
; I32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; I32-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; I32-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; I32-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; I32: [[MIDDLE_BLOCK]]:
; I32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_EXT]], [[N_VEC]]
; I32-NEXT: br i1 [[CMP_N]], [[EXIT_LOOPEXIT:label %.*]], label %[[SCALAR_PH]]
More information about the llvm-commits
mailing list