[llvm] ad9c0b3 - [SLP]Check if the gathered loads form full vector before attempting build it
Alexey Bataev via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 18 14:09:38 PST 2024
Author: Alexey Bataev
Date: 2024-11-18T14:09:31-08:00
New Revision: ad9c0b369e86e75d56e229f294782a4eaf527226
URL: https://github.com/llvm/llvm-project/commit/ad9c0b369e86e75d56e229f294782a4eaf527226
DIFF: https://github.com/llvm/llvm-project/commit/ad9c0b369e86e75d56e229f294782a4eaf527226.diff
LOG: [SLP]Check if the gathered loads form full vector before attempting build it
Need to check that the number of gathered loads in the slice forms the
build vector to avoid compiler crash.
Fixes #116691
Added:
llvm/test/Transforms/SLPVectorizer/X86/gathered-loads-non-full-reg.ll
Modified:
llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 918d7663548f51..dc0dffd9fcbf81 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -6815,16 +6815,7 @@ void BoUpSLP::tryToVectorizeGatheredLoads(
// Check if it is profitable to try vectorizing gathered loads. It is
// profitable if we have more than 3 consecutive loads or if we have
// less but all users are vectorized or deleted.
- bool AllowToVectorize =
- NumElts >= 3 ||
- any_of(ValueToGatherNodes.at(Slice.front()),
- [=](const TreeEntry *TE) {
- return TE->Scalars.size() == 2 &&
- ((TE->Scalars.front() == Slice.front() &&
- TE->Scalars.back() == Slice.back()) ||
- (TE->Scalars.front() == Slice.back() &&
- TE->Scalars.back() == Slice.front()));
- });
+ bool AllowToVectorize = false;
// Check if it is profitable to vectorize 2-elements loads.
if (NumElts == 2) {
bool IsLegalBroadcastLoad = TTI->isLegalBroadcastLoad(
@@ -6861,6 +6852,19 @@ void BoUpSLP::tryToVectorizeGatheredLoads(
return true;
};
AllowToVectorize = CheckIfAllowed(Slice);
+ } else {
+ AllowToVectorize =
+ (NumElts >= 3 ||
+ any_of(ValueToGatherNodes.at(Slice.front()),
+ [=](const TreeEntry *TE) {
+ return TE->Scalars.size() == 2 &&
+ ((TE->Scalars.front() == Slice.front() &&
+ TE->Scalars.back() == Slice.back()) ||
+ (TE->Scalars.front() == Slice.back() &&
+ TE->Scalars.back() == Slice.front()));
+ })) &&
+ hasFullVectorsOrPowerOf2(*TTI, Slice.front()->getType(),
+ Slice.size());
}
if (AllowToVectorize) {
SmallVector<Value *> PointerOps;
@@ -6903,7 +6907,8 @@ void BoUpSLP::tryToVectorizeGatheredLoads(
}
// Mark masked gathers candidates as vectorized, if any.
for (unsigned Cnt : MaskedGatherVectorized) {
- ArrayRef<LoadInst *> Slice = ArrayRef(Loads).slice(Cnt, NumElts);
+ ArrayRef<LoadInst *> Slice = ArrayRef(Loads).slice(
+ Cnt, std::min<unsigned>(NumElts, Loads.size() - Cnt));
ArrayRef<Value *> Values(
reinterpret_cast<Value *const *>(Slice.begin()), Slice.size());
Results.emplace_back(Values, LoadsState::ScatterVectorize);
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/gathered-loads-non-full-reg.ll b/llvm/test/Transforms/SLPVectorizer/X86/gathered-loads-non-full-reg.ll
new file mode 100644
index 00000000000000..79aba19ab02e1c
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/gathered-loads-non-full-reg.ll
@@ -0,0 +1,140 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux -mcpu=cascadelake < %s | FileCheck %s
+
+ at solid_ = external global [608 x i8]
+
+define void @test(ptr noalias %0) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr noalias [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[_LR_PH1019:.*:]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 32
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP0]], i64 128
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP0]], i64 200
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0]], i64 208
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP0]], i64 232
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP0]], i64 288
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP0]], i64 320
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP0]], i64 304
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP0]], i64 424
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP0]], i64 480
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP0]], i64 504
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP0]], i64 632
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP0]], i64 720
+; CHECK-NEXT: [[TMP15:%.*]] = load double, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[TMP16:%.*]] = load double, ptr [[TMP2]], align 8
+; CHECK-NEXT: [[TMP17:%.*]] = fadd double [[TMP16]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = load double, ptr [[TMP3]], align 8
+; CHECK-NEXT: [[TMP19:%.*]] = load double, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP20:%.*]] = load double, ptr [[TMP5]], align 8
+; CHECK-NEXT: [[TMP21:%.*]] = load double, ptr [[TMP6]], align 8
+; CHECK-NEXT: [[TMP22:%.*]] = fadd double [[TMP21]], [[TMP20]]
+; CHECK-NEXT: [[TMP23:%.*]] = load double, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[TMP24:%.*]] = load double, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[TMP25:%.*]] = load double, ptr [[TMP9]], align 8
+; CHECK-NEXT: [[TMP26:%.*]] = load double, ptr [[TMP10]], align 8
+; CHECK-NEXT: [[TMP27:%.*]] = load double, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP28:%.*]] = load double, ptr [[TMP12]], align 8
+; CHECK-NEXT: [[TMP29:%.*]] = fadd double [[TMP28]], [[TMP27]]
+; CHECK-NEXT: [[TMP30:%.*]] = fmul double [[TMP22]], [[TMP18]]
+; CHECK-NEXT: [[TMP31:%.*]] = fmul double [[TMP30]], 0.000000e+00
+; CHECK-NEXT: [[TMP32:%.*]] = fsub double 0.000000e+00, [[TMP25]]
+; CHECK-NEXT: [[TMP33:%.*]] = fmul double [[TMP32]], 0.000000e+00
+; CHECK-NEXT: [[TMP34:%.*]] = fadd double [[TMP33]], 0.000000e+00
+; CHECK-NEXT: [[TMP35:%.*]] = fmul double [[TMP34]], 0.000000e+00
+; CHECK-NEXT: [[TMP36:%.*]] = fmul double [[TMP29]], [[TMP26]]
+; CHECK-NEXT: [[TMP37:%.*]] = fmul double [[TMP36]], 0.000000e+00
+; CHECK-NEXT: [[TMP38:%.*]] = fadd double [[TMP37]], 0.000000e+00
+; CHECK-NEXT: [[TMP39:%.*]] = fsub double [[TMP17]], [[TMP19]]
+; CHECK-NEXT: [[TMP40:%.*]] = fmul double [[TMP39]], [[TMP23]]
+; CHECK-NEXT: [[TMP41:%.*]] = fmul double [[TMP40]], 0.000000e+00
+; CHECK-NEXT: [[TMP42:%.*]] = load double, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[TMP43:%.*]] = load double, ptr [[TMP13]], align 8
+; CHECK-NEXT: [[TMP44:%.*]] = fmul double [[TMP43]], [[TMP31]]
+; CHECK-NEXT: [[TMP45:%.*]] = load double, ptr [[TMP14]], align 8
+; CHECK-NEXT: [[TMP46:%.*]] = fmul double [[TMP35]], 0.000000e+00
+; CHECK-NEXT: [[TMP47:%.*]] = fadd double [[TMP44]], 0.000000e+00
+; CHECK-NEXT: [[TMP48:%.*]] = fmul double [[TMP45]], [[TMP38]]
+; CHECK-NEXT: [[TMP49:%.*]] = fmul double [[TMP45]], [[TMP41]]
+; CHECK-NEXT: store double [[TMP46]], ptr getelementptr inbounds (i8, ptr @solid_, i64 384), align 8
+; CHECK-NEXT: store double [[TMP47]], ptr getelementptr inbounds (i8, ptr @solid_, i64 408), align 8
+; CHECK-NEXT: store double [[TMP48]], ptr getelementptr inbounds (i8, ptr @solid_, i64 392), align 8
+; CHECK-NEXT: store double [[TMP49]], ptr getelementptr inbounds (i8, ptr @solid_, i64 400), align 8
+; CHECK-NEXT: [[DOTNEG965:%.*]] = fmul double [[TMP48]], [[TMP24]]
+; CHECK-NEXT: [[REASS_ADD993:%.*]] = fadd double [[DOTNEG965]], 0.000000e+00
+; CHECK-NEXT: [[TMP50:%.*]] = fadd double [[TMP42]], [[REASS_ADD993]]
+; CHECK-NEXT: [[TMP51:%.*]] = fsub double 0.000000e+00, [[TMP50]]
+; CHECK-NEXT: store double [[TMP51]], ptr getelementptr inbounds (i8, ptr @solid_, i64 296), align 8
+; CHECK-NEXT: [[DOTNEG969:%.*]] = fmul double [[TMP49]], 0.000000e+00
+; CHECK-NEXT: [[REASS_ADD996:%.*]] = fadd double [[DOTNEG969]], 0.000000e+00
+; CHECK-NEXT: [[TMP52:%.*]] = fadd double [[TMP45]], [[REASS_ADD996]]
+; CHECK-NEXT: [[TMP53:%.*]] = fsub double 0.000000e+00, [[TMP52]]
+; CHECK-NEXT: store double [[TMP53]], ptr getelementptr inbounds (i8, ptr @solid_, i64 304), align 8
+; CHECK-NEXT: ret void
+;
+.lr.ph1019:
+ %1 = getelementptr i8, ptr %0, i64 8
+ %2 = getelementptr i8, ptr %0, i64 32
+ %3 = getelementptr i8, ptr %0, i64 128
+ %4 = getelementptr i8, ptr %0, i64 200
+ %5 = getelementptr i8, ptr %0, i64 208
+ %6 = getelementptr i8, ptr %0, i64 232
+ %7 = getelementptr i8, ptr %0, i64 288
+ %8 = getelementptr i8, ptr %0, i64 320
+ %9 = getelementptr i8, ptr %0, i64 304
+ %10 = getelementptr i8, ptr %0, i64 424
+ %11 = getelementptr i8, ptr %0, i64 480
+ %12 = getelementptr i8, ptr %0, i64 504
+ %13 = getelementptr i8, ptr %0, i64 632
+ %14 = getelementptr i8, ptr %0, i64 720
+ %15 = load double, ptr %1, align 8
+ %16 = load double, ptr %2, align 8
+ %17 = fadd double %16, %15
+ %18 = load double, ptr %3, align 8
+ %19 = load double, ptr %4, align 8
+ %20 = load double, ptr %5, align 8
+ %21 = load double, ptr %6, align 8
+ %22 = fadd double %21, %20
+ %23 = load double, ptr %7, align 8
+ %24 = load double, ptr %8, align 8
+ %25 = load double, ptr %9, align 8
+ %26 = load double, ptr %10, align 8
+ %27 = load double, ptr %11, align 8
+ %28 = load double, ptr %12, align 8
+ %29 = fadd double %28, %27
+ %30 = fmul double %22, %18
+ %31 = fmul double %30, 0.000000e+00
+ %32 = fsub double 0.000000e+00, %25
+ %33 = fmul double %32, 0.000000e+00
+ %34 = fadd double %33, 0.000000e+00
+ %35 = fmul double %34, 0.000000e+00
+ %36 = fmul double %29, %26
+ %37 = fmul double %36, 0.000000e+00
+ %38 = fadd double %37, 0.000000e+00
+ %39 = fsub double %17, %19
+ %40 = fmul double %39, %23
+ %41 = fmul double %40, 0.000000e+00
+ %42 = load double, ptr %0, align 8
+ %43 = load double, ptr %13, align 8
+ %44 = fmul double %43, %31
+ %45 = load double, ptr %14, align 8
+ %46 = fmul double %35, 0.000000e+00
+ %47 = fadd double %44, 0.000000e+00
+ %48 = fmul double %45, %38
+ %49 = fmul double %45, %41
+ store double %46, ptr getelementptr inbounds (i8, ptr @solid_, i64 384), align 8
+ store double %47, ptr getelementptr inbounds (i8, ptr @solid_, i64 408), align 8
+ store double %48, ptr getelementptr inbounds (i8, ptr @solid_, i64 392), align 8
+ store double %49, ptr getelementptr inbounds (i8, ptr @solid_, i64 400), align 8
+ %.neg965 = fmul double %48, %24
+ %reass.add993 = fadd double %.neg965, 0.000000e+00
+ %50 = fadd double %42, %reass.add993
+ %51 = fsub double 0.000000e+00, %50
+ store double %51, ptr getelementptr inbounds (i8, ptr @solid_, i64 296), align 8
+ %.neg969 = fmul double %49, 0.000000e+00
+ %reass.add996 = fadd double %.neg969, 0.000000e+00
+ %52 = fadd double %45, %reass.add996
+ %53 = fsub double 0.000000e+00, %52
+ store double %53, ptr getelementptr inbounds (i8, ptr @solid_, i64 304), align 8
+ ret void
+}
More information about the llvm-commits
mailing list