[llvm] [SLP]Better cost estimation for masked gather or "clustered" loads. (PR #105858)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 23 10:01:39 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms
Author: Alexey Bataev (alexey-bataev)
<details>
<summary>Changes</summary>
After landing support for actual vectorization of the "clustered" loads,
need better estimate the cost between the masked gather and clustered loads.
This includes estimation of the address calculation and better
estimation of the gathered loads.
---
Patch is 37.76 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/105858.diff
7 Files Affected:
- (modified) llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp (+151-75)
- (modified) llvm/test/Transforms/SLPVectorizer/X86/pr47629-inseltpoison.ll (+14-6)
- (modified) llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll (+14-6)
- (modified) llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll (+32-12)
- (modified) llvm/test/Transforms/SLPVectorizer/X86/remark-masked-loads-consecutive-loads-same-ptr.ll (+12-8)
- (modified) llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll (+56-16)
- (modified) llvm/test/Transforms/SLPVectorizer/X86/reorder-reused-masked-gather2.ll (+14-12)
``````````diff
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 949579772b94d5..f2ebc444573399 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -4780,16 +4780,74 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
}
}
}
- auto CheckForShuffledLoads = [&, &TTI = *TTI](Align CommonAlignment) {
+ // Correctly identify compare the cost of loads + shuffles rather than
+ // strided/masked gather loads. Returns true if vectorized + shuffles
+ // representation is better than just gather.
+ auto CheckForShuffledLoads = [&,
+ &TTI = *TTI](Align CommonAlignment,
+ bool ProfitableGatherPointers) {
+ // Compare masked gather cost and loads + insert subvector costs.
+ TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
+ auto [ScalarGEPCost, VectorGEPCost] =
+ getGEPCosts(TTI, PointerOps, PointerOps.front(),
+ Instruction::GetElementPtr, CostKind, ScalarTy, VecTy);
+ // Estimate the cost of masked gather GEP. If not a splat, roughly
+ // estimate as a buildvector, otherwise estimate as splat.
+ if (static_cast<unsigned>(
+ count_if(PointerOps, IsaPred<GetElementPtrInst>)) <
+ PointerOps.size() - 1 ||
+ any_of(PointerOps, [&](Value *V) {
+ return getUnderlyingObject(V) !=
+ getUnderlyingObject(PointerOps.front());
+ }))
+ VectorGEPCost += TTI.getScalarizationOverhead(
+ VecTy,
+ APInt::getAllOnes(VecTy->getElementCount().getKnownMinValue()),
+ /*Insert=*/true, /*Extract=*/false, CostKind);
+ else
+ VectorGEPCost +=
+ TTI.getScalarizationOverhead(
+ VecTy,
+ APInt::getOneBitSet(VecTy->getElementCount().getKnownMinValue(),
+ 0),
+ /*Insert=*/true, /*Extract=*/false, CostKind) +
+ ::getShuffleCost(TTI, TTI::SK_Broadcast, VecTy, std::nullopt,
+ CostKind);
+ // The cost of scalar loads.
+ InstructionCost ScalarLoadsCost =
+ std::accumulate(VL.begin(), VL.end(), InstructionCost(),
+ [&](InstructionCost C, Value *V) {
+ return C + TTI.getInstructionCost(
+ cast<Instruction>(V), CostKind);
+ }) +
+ ScalarGEPCost;
+ // The cost of masked gather.
+ InstructionCost MaskedGatherCost =
+ TTI.getGatherScatterOpCost(Instruction::Load, VecTy,
+ cast<LoadInst>(VL0)->getPointerOperand(),
+ /*VariableMask=*/false, CommonAlignment,
+ CostKind) +
+ (ProfitableGatherPointers ? 0 : VectorGEPCost);
+ APInt DemandedElts = APInt::getAllOnes(VecTy->getNumElements());
+ InstructionCost GatherCost =
+ TTI.getScalarizationOverhead(VecTy, DemandedElts, /*Insert=*/true,
+ /*Extract=*/false, CostKind) +
+ ScalarLoadsCost;
+ // The list of loads is small or perform partial check already - directly
+ // compare masked gather cost and gather cost.
+ constexpr unsigned ListLimit = 4;
+ if (!TryRecursiveCheck || VL.size() < ListLimit)
+ return MaskedGatherCost - GatherCost >= -SLPCostThreshold;
unsigned Sz = DL->getTypeSizeInBits(ScalarTy);
- unsigned MinVF = getMinVF(Sz);
- unsigned MaxVF = std::max<unsigned>(bit_floor(VL.size() / 2), MinVF);
+ unsigned MinVF = 2;
+ unsigned MaxVF = bit_floor(VL.size() / 2);
MaxVF = std::min(getMaximumVF(Sz, Instruction::Load), MaxVF);
+ DemandedElts.clearAllBits();
+ // Iterate through possible vectorization factors and check if vectorized
+ // + shuffles is better than just gather.
for (unsigned VF = MaxVF; VF >= MinVF; VF /= 2) {
- unsigned VectorizedCnt = 0;
SmallVector<LoadsState> States;
- for (unsigned Cnt = 0, End = VL.size(); Cnt + VF <= End;
- Cnt += VF, ++VectorizedCnt) {
+ for (unsigned Cnt = 0, End = VL.size(); Cnt + VF <= End; Cnt += VF) {
ArrayRef<Value *> Slice = VL.slice(Cnt, VF);
SmallVector<unsigned> Order;
SmallVector<Value *> PointerOps;
@@ -4797,8 +4855,10 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
canVectorizeLoads(Slice, Slice.front(), Order, PointerOps,
/*TryRecursiveCheck=*/false);
// Check that the sorted loads are consecutive.
- if (LS == LoadsState::Gather)
- break;
+ if (LS == LoadsState::Gather) {
+ DemandedElts.setBits(Cnt, Cnt + VF);
+ continue;
+ }
// If need the reorder - consider as high-cost masked gather for now.
if ((LS == LoadsState::Vectorize ||
LS == LoadsState::StridedVectorize) &&
@@ -4806,79 +4866,94 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
LS = LoadsState::ScatterVectorize;
States.push_back(LS);
}
+ if (DemandedElts.isAllOnes())
+ // All loads gathered - try smaller VF.
+ continue;
+ InstructionCost ScalarVFGEPCost = 0;
// Can be vectorized later as a serie of loads/insertelements.
- if (VectorizedCnt == VL.size() / VF) {
- // Compare masked gather cost and loads + insersubvector costs.
- TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
- auto [ScalarGEPCost, VectorGEPCost] = getGEPCosts(
- TTI, PointerOps, PointerOps.front(), Instruction::GetElementPtr,
- CostKind, ScalarTy, VecTy);
- InstructionCost MaskedGatherCost =
- TTI.getGatherScatterOpCost(
- Instruction::Load, VecTy,
- cast<LoadInst>(VL0)->getPointerOperand(),
- /*VariableMask=*/false, CommonAlignment, CostKind) +
- VectorGEPCost - ScalarGEPCost;
- InstructionCost VecLdCost = 0;
- auto *SubVecTy = getWidenedType(ScalarTy, VF);
- for (auto [I, LS] : enumerate(States)) {
- auto *LI0 = cast<LoadInst>(VL[I * VF]);
- switch (LS) {
- case LoadsState::Vectorize: {
- auto [ScalarGEPCost, VectorGEPCost] =
- getGEPCosts(TTI, ArrayRef(PointerOps).slice(I * VF, VF),
- LI0->getPointerOperand(), Instruction::Load,
- CostKind, ScalarTy, SubVecTy);
- VecLdCost += TTI.getMemoryOpCost(
- Instruction::Load, SubVecTy, LI0->getAlign(),
- LI0->getPointerAddressSpace(), CostKind,
- TTI::OperandValueInfo()) +
- VectorGEPCost - ScalarGEPCost;
- break;
- }
- case LoadsState::StridedVectorize: {
- auto [ScalarGEPCost, VectorGEPCost] =
- getGEPCosts(TTI, ArrayRef(PointerOps).slice(I * VF, VF),
- LI0->getPointerOperand(), Instruction::Load,
- CostKind, ScalarTy, SubVecTy);
+ InstructionCost VecLdCost = 0;
+ if (!DemandedElts.isZero()) {
+ VecLdCost =
+ TTI.getScalarizationOverhead(VecTy, DemandedElts, /*Insert=*/true,
+ /*Extract=*/false, CostKind) +
+ ScalarGEPCost;
+ for (unsigned Idx : seq<unsigned>(VL.size()))
+ if (DemandedElts[Idx])
VecLdCost +=
- TTI.getStridedMemoryOpCost(
- Instruction::Load, SubVecTy, LI0->getPointerOperand(),
- /*VariableMask=*/false, CommonAlignment, CostKind) +
- VectorGEPCost - ScalarGEPCost;
- break;
- }
- case LoadsState::ScatterVectorize: {
- auto [ScalarGEPCost, VectorGEPCost] = getGEPCosts(
- TTI, ArrayRef(PointerOps).slice(I * VF, VF),
- LI0->getPointerOperand(), Instruction::GetElementPtr,
- CostKind, ScalarTy, SubVecTy);
- VecLdCost +=
- TTI.getGatherScatterOpCost(
- Instruction::Load, SubVecTy, LI0->getPointerOperand(),
- /*VariableMask=*/false, CommonAlignment, CostKind) +
- VectorGEPCost - ScalarGEPCost;
- break;
- }
- case LoadsState::Gather:
- llvm_unreachable(
- "Expected only consecutive, strided or masked gather loads.");
- }
- SmallVector<int> ShuffleMask(VL.size());
- for (int Idx : seq<int>(0, VL.size()))
- ShuffleMask[Idx] = Idx / VF == I ? VL.size() + Idx % VF : Idx;
+ TTI.getInstructionCost(cast<Instruction>(VL[Idx]), CostKind);
+ }
+ auto *SubVecTy = getWidenedType(ScalarTy, VF);
+ for (auto [I, LS] : enumerate(States)) {
+ auto *LI0 = cast<LoadInst>(VL[I * VF]);
+ InstructionCost VectorGEPCost =
+ (LS == LoadsState::ScatterVectorize && ProfitableGatherPointers)
+ ? 0
+ : getGEPCosts(TTI, ArrayRef(PointerOps).slice(I * VF, VF),
+ LI0->getPointerOperand(),
+ Instruction::GetElementPtr, CostKind, ScalarTy,
+ SubVecTy)
+ .second;
+ if (LS == LoadsState::ScatterVectorize) {
+ if (static_cast<unsigned>(
+ count_if(PointerOps, IsaPred<GetElementPtrInst>)) <
+ PointerOps.size() - 1 ||
+ any_of(PointerOps, [&](Value *V) {
+ return getUnderlyingObject(V) !=
+ getUnderlyingObject(PointerOps.front());
+ }))
+ VectorGEPCost += TTI.getScalarizationOverhead(
+ SubVecTy, APInt::getAllOnes(VF),
+ /*Insert=*/true, /*Extract=*/false, CostKind);
+ else
+ VectorGEPCost +=
+ TTI.getScalarizationOverhead(
+ SubVecTy, APInt::getOneBitSet(VF, 0),
+ /*Insert=*/true, /*Extract=*/false, CostKind) +
+ ::getShuffleCost(TTI, TTI::SK_Broadcast, SubVecTy,
+ std::nullopt, CostKind);
+ }
+ switch (LS) {
+ case LoadsState::Vectorize:
+ VecLdCost += TTI.getMemoryOpCost(
+ Instruction::Load, SubVecTy, LI0->getAlign(),
+ LI0->getPointerAddressSpace(), CostKind,
+ TTI::OperandValueInfo()) +
+ VectorGEPCost;
+ break;
+ case LoadsState::StridedVectorize:
+ VecLdCost += TTI.getStridedMemoryOpCost(Instruction::Load, SubVecTy,
+ LI0->getPointerOperand(),
+ /*VariableMask=*/false,
+ CommonAlignment, CostKind) +
+ VectorGEPCost;
+ break;
+ case LoadsState::ScatterVectorize:
+ VecLdCost += TTI.getGatherScatterOpCost(Instruction::Load, SubVecTy,
+ LI0->getPointerOperand(),
+ /*VariableMask=*/false,
+ CommonAlignment, CostKind) +
+ VectorGEPCost;
+ break;
+ case LoadsState::Gather:
+ // Gathers are already calculated - ignore.
+ continue;
+ }
+ SmallVector<int> ShuffleMask(VL.size());
+ for (int Idx : seq<int>(0, VL.size()))
+ ShuffleMask[Idx] = Idx / VF == I ? VL.size() + Idx % VF : Idx;
+ if (I > 0)
VecLdCost +=
::getShuffleCost(TTI, TTI::SK_InsertSubvector, VecTy,
ShuffleMask, CostKind, I * VF, SubVecTy);
- }
- // If masked gather cost is higher - better to vectorize, so
- // consider it as a gather node. It will be better estimated
- // later.
- if (MaskedGatherCost >= VecLdCost)
- return true;
}
+ // If masked gather cost is higher - better to vectorize, so
+ // consider it as a gather node. It will be better estimated
+ // later.
+ if (MaskedGatherCost >= VecLdCost &&
+ VecLdCost - GatherCost < -SLPCostThreshold)
+ return true;
}
- return false;
+ return MaskedGatherCost - GatherCost >= -SLPCostThreshold;
};
// TODO: need to improve analysis of the pointers, if not all of them are
// GEPs or have > 2 operands, we end up with a gather node, which just
@@ -4900,7 +4975,8 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
!TTI->forceScalarizeMaskedGather(VecTy, CommonAlignment)) {
// Check if potential masked gather can be represented as series
// of loads + insertsubvectors.
- if (TryRecursiveCheck && CheckForShuffledLoads(CommonAlignment)) {
+ if (TryRecursiveCheck &&
+ CheckForShuffledLoads(CommonAlignment, ProfitableGatherPointers)) {
// If masked gather cost is higher - better to vectorize, so
// consider it as a gather node. It will be better estimated
// later.
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr47629-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr47629-inseltpoison.ll
index 5b33c6e889363e..89bc44dc1d530a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr47629-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr47629-inseltpoison.ll
@@ -180,12 +180,20 @@ define void @gather_load_2(ptr noalias nocapture %0, ptr noalias nocapture reado
; AVX512F-NEXT: ret void
;
; AVX512VL-LABEL: @gather_load_2(
-; AVX512VL-NEXT: [[TMP3:%.*]] = insertelement <4 x ptr> poison, ptr [[TMP1:%.*]], i64 0
-; AVX512VL-NEXT: [[TMP4:%.*]] = shufflevector <4 x ptr> [[TMP3]], <4 x ptr> poison, <4 x i32> zeroinitializer
-; AVX512VL-NEXT: [[TMP5:%.*]] = getelementptr i32, <4 x ptr> [[TMP4]], <4 x i64> <i64 1, i64 10, i64 3, i64 5>
-; AVX512VL-NEXT: [[TMP6:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP5]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison), !tbaa [[TBAA0]]
-; AVX512VL-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[TMP6]], <i32 1, i32 2, i32 3, i32 4>
-; AVX512VL-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP0:%.*]], align 4, !tbaa [[TBAA0]]
+; AVX512VL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1:%.*]], i64 4
+; AVX512VL-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !tbaa [[TBAA0]]
+; AVX512VL-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 40
+; AVX512VL-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !tbaa [[TBAA0]]
+; AVX512VL-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12
+; AVX512VL-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !tbaa [[TBAA0]]
+; AVX512VL-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 20
+; AVX512VL-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !tbaa [[TBAA0]]
+; AVX512VL-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> poison, i32 [[TMP4]], i64 0
+; AVX512VL-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP6]], i64 1
+; AVX512VL-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP8]], i64 2
+; AVX512VL-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP13]], i32 [[TMP10]], i64 3
+; AVX512VL-NEXT: [[TMP15:%.*]] = add nsw <4 x i32> [[TMP14]], <i32 1, i32 2, i32 3, i32 4>
+; AVX512VL-NEXT: store <4 x i32> [[TMP15]], ptr [[TMP0:%.*]], align 4, !tbaa [[TBAA0]]
; AVX512VL-NEXT: ret void
;
%3 = getelementptr inbounds i32, ptr %1, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll
index 09d6c77557efaa..c1b501015e81e4 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr47629.ll
@@ -180,12 +180,20 @@ define void @gather_load_2(ptr noalias nocapture %0, ptr noalias nocapture reado
; AVX512F-NEXT: ret void
;
; AVX512VL-LABEL: @gather_load_2(
-; AVX512VL-NEXT: [[TMP3:%.*]] = insertelement <4 x ptr> poison, ptr [[TMP1:%.*]], i64 0
-; AVX512VL-NEXT: [[TMP4:%.*]] = shufflevector <4 x ptr> [[TMP3]], <4 x ptr> poison, <4 x i32> zeroinitializer
-; AVX512VL-NEXT: [[TMP5:%.*]] = getelementptr i32, <4 x ptr> [[TMP4]], <4 x i64> <i64 1, i64 10, i64 3, i64 5>
-; AVX512VL-NEXT: [[TMP6:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP5]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison), !tbaa [[TBAA0]]
-; AVX512VL-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[TMP6]], <i32 1, i32 2, i32 3, i32 4>
-; AVX512VL-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP0:%.*]], align 4, !tbaa [[TBAA0]]
+; AVX512VL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1:%.*]], i64 4
+; AVX512VL-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !tbaa [[TBAA0]]
+; AVX512VL-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 40
+; AVX512VL-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !tbaa [[TBAA0]]
+; AVX512VL-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12
+; AVX512VL-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !tbaa [[TBAA0]]
+; AVX512VL-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 20
+; AVX512VL-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !tbaa [[TBAA0]]
+; AVX512VL-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> poison, i32 [[TMP4]], i64 0
+; AVX512VL-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP6]], i64 1
+; AVX512VL-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP8]], i64 2
+; AVX512VL-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP13]], i32 [[TMP10]], i64 3
+; AVX512VL-NEXT: [[TMP15:%.*]] = add nsw <4 x i32> [[TMP14]], <i32 1, i32 2, i32 3, i32 4>
+; AVX512VL-NEXT: store <4 x i32> [[TMP15]], ptr [[TMP0:%.*]], align 4, !tbaa [[TBAA0]]
; AVX512VL-NEXT: ret void
;
%3 = getelementptr inbounds i32, ptr %1, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll
index 83457cc4966f7c..729d5fd5546dc8 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll
@@ -10,19 +10,39 @@ declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, i32 immarg
define void @test(ptr nocapture readonly %arg, ptr nocapture readonly %arg1, ptr nocapture %arg2) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x ptr> poison, ptr [[ARG:%.*]], i32 0
-; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x ptr> [[TMP0]], <8 x ptr> poison, <8 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, <8 x ptr> [[SHUFFLE]], <8 x i64> <i64 1, i64 3, i64 5, i64 7, i64 9, i64 11, i64 13, i64 15>
+; CHECK-NEXT: [[GEP1_0:%.*]] = getelementptr inbounds double, ptr [[ARG:%.*]], i64 1
+; CHECK-NEXT: [[LD1_0:%.*]] = load double, ptr [[GEP1_0]], align 8
; CHECK-NEXT: [[GEP2_0:%.*]] = getelementptr inbounds double, ptr [[ARG1:%.*]], i64 16
-; CHECK-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.masked.gather.v8f64.v8p0(<8 x ptr> [[TMP1]], i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x double> poison)
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x double>, ptr [[GEP2_0]], align 8
-; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <8 x double> [[TMP4]], [[TMP2]]
-; CHECK-...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/105858
More information about the llvm-commits
mailing list