[llvm] [AArch64][CostModel] Consider the cost of const vector (PR #117539)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 25 02:53:43 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-analysis
Author: Sushant Gokhale (sushgokh)
<details>
<summary>Changes</summary>
Currently, we consider cost of const vector as zero. Consider the below e.g
```
%1 = add <2 x float> %1, <float 21.0, float 22.0>
```
Here, the cost of const vector `<float 21.0, float 22.0>` is considered zero.
However, this might not be the case. On AArch64 platform, this results in `adrp + ldr` instruction corresponding to this const vector.
This patch alters the AArch64 cost-model to consider the cost of const vector.
Perf results with SPEC17(tested on `-mcpu = neoverse-v2`)(uplift indicated by + sign):
| Benchmark | Perf diff(%) |
| ------------- | ------------- |
541.leela | +(2.2 - 3)%
---
Patch is 34.75 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/117539.diff
12 Files Affected:
- (modified) llvm/include/llvm/Analysis/TargetTransformInfo.h (+22)
- (modified) llvm/include/llvm/Analysis/TargetTransformInfoImpl.h (+11)
- (modified) llvm/include/llvm/CodeGen/BasicTTIImpl.h (+12-4)
- (modified) llvm/lib/Analysis/TargetTransformInfo.cpp (+14)
- (modified) llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp (+28-6)
- (modified) llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h (+11-4)
- (modified) llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp (+69-3)
- (modified) llvm/test/Transforms/SLPVectorizer/AArch64/insertelement.ll (+8-2)
- (modified) llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll (+31-16)
- (modified) llvm/test/Transforms/SLPVectorizer/AArch64/vec3-base.ll (+7-4)
- (modified) llvm/test/Transforms/SLPVectorizer/jumbled_store_crash.ll (+78-2)
- (modified) llvm/test/Transforms/SLPVectorizer/materialize-vector-of-consts.ll (+20-9)
``````````diff
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 985ca1532e0149..6ee2e34fe3ee1d 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1449,6 +1449,13 @@ class TargetTransformInfo {
const APInt &DemandedDstElts,
TTI::TargetCostKind CostKind) const;
+ /// \return The cost of materializing a constant vector.
+ InstructionCost
+ getConstVectCost(unsigned Opcode, Type *Src, Align Alignment,
+ unsigned AddressSpace, TTI::TargetCostKind CostKind,
+ OperandValueInfo OpdInfo, const Instruction *I,
+ const bool SrcIsConstVect, InstructionCost ScalarCost) const;
+
/// \return The cost of Load and Store instructions.
InstructionCost
getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
@@ -2148,6 +2155,11 @@ class TargetTransformInfo::Concept {
TTI::TargetCostKind CostKind) = 0;
virtual InstructionCost
+ getConstVectCost(unsigned Opcode, Type *Src, Align Alignment,
+ unsigned AddressSpace, TTI::TargetCostKind CostKind,
+ OperandValueInfo OpInfo, const Instruction *I,
+ const bool SrcIsConstVect, InstructionCost ScalarCost) = 0;
+ virtual InstructionCost
getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
unsigned AddressSpace, TTI::TargetCostKind CostKind,
OperandValueInfo OpInfo, const Instruction *I) = 0;
@@ -2850,6 +2862,16 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
return Impl.getReplicationShuffleCost(EltTy, ReplicationFactor, VF,
DemandedDstElts, CostKind);
}
+ InstructionCost getConstVectCost(unsigned Opcode, Type *Src, Align Alignment,
+ unsigned AddressSpace,
+ TTI::TargetCostKind CostKind,
+ OperandValueInfo OpInfo,
+ const Instruction *I,
+ const bool SrcIsConstVect,
+ InstructionCost ScalarCost) override {
+ return Impl.getConstVectCost(Opcode, Src, Alignment, AddressSpace, CostKind,
+ OpInfo, I, SrcIsConstVect, ScalarCost);
+ }
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 38aba183f6a173..7f7d8f955f4b86 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -730,6 +730,17 @@ class TargetTransformInfoImplBase {
return 1;
}
+ InstructionCost getConstVectCost(unsigned Opcode, Type *Src, Align Alignment,
+ unsigned AddressSpace,
+ TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo OpInfo,
+ const Instruction *I,
+ const bool SrcIsConstVect,
+ InstructionCost ScalarCost) const {
+ // Vector cost is considered same as Scalar Cost.
+ return ScalarCost;
+ }
+
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index d2fc40d8ae037e..4082fd1662d95d 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -1369,10 +1369,18 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
}
InstructionCost
- getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
- unsigned AddressSpace, TTI::TargetCostKind CostKind,
- TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
- const Instruction *I = nullptr) {
+ getConstVectCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
+ unsigned AddressSpace, TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo OpInfo, const Instruction *I,
+ const bool SrcIsConstVect, InstructionCost ScalarCost) {
+ return ScalarCost;
+ }
+
+ InstructionCost getMemoryOpCost(
+ unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
+ const Instruction *I = nullptr) {
assert(!Src->isVoidTy() && "Invalid type");
// Assume types, such as structs, are expensive.
if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 1fb2b9836de0cc..707a8f7193cf12 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1099,6 +1099,20 @@ InstructionCost TargetTransformInfo::getReplicationShuffleCost(
return Cost;
}
+InstructionCost TargetTransformInfo::getConstVectCost(
+ unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
+ TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo,
+ const Instruction *I, const bool SrcIsConstVect,
+ InstructionCost ScalarCost) const {
+ assert((I == nullptr || I->getOpcode() == Opcode) &&
+ "Opcode should reflect passed instruction.");
+ InstructionCost Cost =
+ TTIImpl->getConstVectCost(Opcode, Src, Alignment, AddressSpace, CostKind,
+ OpInfo, I, SrcIsConstVect, ScalarCost);
+ assert(Cost >= 0 && "TTI should not produce negative costs!");
+ return Cost;
+}
+
InstructionCost TargetTransformInfo::getMemoryOpCost(
unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo,
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 7a1e401bca18cb..b657d819a8a260 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -3785,12 +3785,21 @@ bool AArch64TTIImpl::useNeonVector(const Type *Ty) const {
return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
}
-InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
- MaybeAlign Alignment,
- unsigned AddressSpace,
- TTI::TargetCostKind CostKind,
- TTI::OperandValueInfo OpInfo,
- const Instruction *I) {
+// Return the cost of materializing a constant vector.
+InstructionCost AArch64TTIImpl::getConstVectCost(
+ unsigned Opcode, Type *Ty, MaybeAlign Alignment, unsigned AddressSpace,
+ TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo,
+ const Instruction *I, const bool SrcIsConstVect,
+ InstructionCost ScalarCost) {
+ return getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace, CostKind, OpInfo,
+ I, SrcIsConstVect, ScalarCost);
+}
+
+InstructionCost AArch64TTIImpl::getMemoryOpCost(
+ unsigned Opcode, Type *Ty, MaybeAlign Alignment, unsigned AddressSpace,
+ TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo,
+ const Instruction *I, const bool SrcIsConstVect,
+ InstructionCost ScalarCost) {
EVT VT = TLI->getValueType(DL, Ty, true);
// Type legalization can't handle structs
if (VT == MVT::Other)
@@ -3801,6 +3810,11 @@ InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
if (!LT.first.isValid())
return InstructionCost::getInvalid();
+ // FIXME: Consider the cost of materializing const vector where the
+ // legalization cost > 1.
+ if (SrcIsConstVect && LT.first.getValue().value() > 1)
+ return ScalarCost;
+
// The code-generator is currently not able to handle scalable vectors
// of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
// it. This change will be removed when code-generation for these types is
@@ -3845,6 +3859,14 @@ InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
// Otherwise we need to scalarize.
return cast<FixedVectorType>(Ty)->getNumElements() * 2;
}
+
+ // Const vector is lowered into `adrp + ldr`. This ldr is of the form
+ // "load vector reg, literal, S/D/Q forms" and is of very high latency.
+ // FIXME: This only considers the cost of ldr. Also consider the cost of
+ // adrp.
+ if (SrcIsConstVect)
+ return 4;
+
EVT EltVT = VT.getVectorElementType();
unsigned EltSize = EltVT.getScalarSizeInBits();
if (!isPowerOf2_32(EltSize) || EltSize < 8 || EltSize > 64 ||
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index 201bc831b816b3..9f01410deb52b3 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -235,10 +235,17 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
bool useNeonVector(const Type *Ty) const;
InstructionCost
- getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
- unsigned AddressSpace, TTI::TargetCostKind CostKind,
- TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
- const Instruction *I = nullptr);
+ getConstVectCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
+ unsigned AddressSpace, TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo OpInfo, const Instruction *I,
+ const bool SrcIsConstVect, InstructionCost ScalarCost);
+
+ InstructionCost getMemoryOpCost(
+ unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
+ const Instruction *I = nullptr, const bool SrcIsConstVect = false,
+ InstructionCost ScalarCost = 10000);
InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index d033b7c2ef4a92..f49979107fe3a6 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -3681,7 +3681,24 @@ class BoUpSLP {
if (AllConstsOrCasts)
CastMaxMinBWSizes =
std::make_pair(std::numeric_limits<unsigned>::max(), 1);
- MustGather.insert(VL.begin(), VL.end());
+ // Recording all constants entry helps in avoiding counting the cost of
+ // const vector twice.
+ if (allConstant(VL) && !isSplat(VL)) {
+ for (Value *V : VL) {
+ const TreeEntry *TE = getTreeEntry(V);
+ assert((!TE || TE == Last || doesNotNeedToBeScheduled(V)) &&
+ "Scalar already in tree!");
+ if (TE) {
+ if (TE != Last)
+ MultiNodeScalars.try_emplace(V).first->getSecond().push_back(
+ Last);
+ continue;
+ }
+ ScalarToTreeEntry[V] = Last;
+ }
+ } else {
+ MustGather.insert(VL.begin(), VL.end());
+ }
}
if (UserTreeIdx.UserTE)
@@ -11019,8 +11036,54 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
if (E->isGather()) {
- if (allConstant(VL))
+ if (allConstant(VL)) {
+ auto IsAllowedScalarTy = [&](const Type *T) {
+ return T->isFloatTy() || T->isDoubleTy() || T->isIntegerTy();
+ };
+ if (IsAllowedScalarTy(E->Scalars.front()->getType())) {
+ InstructionCost ScalarCost, VectorCost;
+
+ auto IsDuplicateEntry = [&](const TreeEntry *E) {
+ auto *TE = getTreeEntry(E->Scalars[0]);
+ if (TE != E) {
+ auto It = MultiNodeScalars.find(E->Scalars[0]);
+ if (It != MultiNodeScalars.end()) {
+ auto *TEIt = find_if(It->getSecond(), [&](TreeEntry *ME) {
+ return ME->isSame(VL);
+ });
+ if (TEIt != It->getSecond().end())
+ return true;
+ }
+ }
+ return false;
+ };
+
+ // FIXME: If there is more than 1 SLP tree realizing the same const
+ // vector, codegen will realize it only once. Hence, no need to consider
+ // the cost of const vector twice. But, currently we can't check if the
+ // tree entry is present in other SLP tree.
+ if (!isSplat(E->Scalars) && !all_of(E->Scalars, IsaPred<UndefValue>) &&
+ !IsDuplicateEntry(E)) {
+ // Get unique scalars
+ SmallDenseSet<Value *> UniqScalars;
+ for (auto *V : E->Scalars)
+ UniqScalars.insert(V);
+
+ // Constant is realized by having a mov/fmov into GPR. So,
+ // ScalarCost = #UniqScalars
+ ScalarCost = (UniqScalars.size());
+
+ // FIXME: Ideally, getGatherCost API should be used for this but
+ // currently, this API returns zero cost with all constants.
+ VectorCost =
+ TTI->getConstVectCost(Instruction::Load, FinalVecTy, Align(), 0,
+ CostKind, TTI::OperandValueInfo(), nullptr,
+ /*AllConstants=*/true, ScalarCost);
+ }
+ return VectorCost - ScalarCost;
+ }
return 0;
+ }
if (isa<InsertElementInst>(VL[0]))
return InstructionCost::getInvalid();
if (isa<CmpInst>(VL.front()))
@@ -12557,7 +12620,10 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
auto *Inst = cast<Instruction>(EU.Scalar);
InstructionCost ScalarCost = TTI->getInstructionCost(Inst, CostKind);
auto OperandIsScalar = [&](Value *V) {
- if (!getTreeEntry(V)) {
+ if (auto *TE = getTreeEntry(V);
+ // All constants entry does not result in a seperate instruction.
+ // Ignore such entry.
+ !TE || (TE && allConstant(TE->Scalars))) {
// Some extractelements might be not vectorized, but
// transformed into shuffle and removed from the function,
// consider it here.
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/insertelement.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/insertelement.ll
index 1198bb1d509ebb..d6d46e54456142 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/insertelement.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/insertelement.ll
@@ -39,10 +39,16 @@ declare float @llvm.fabs.f32(float)
define <4 x float> @insertelement_poison_lanes(ptr %0) {
; CHECK-LABEL: @insertelement_poison_lanes(
+; CHECK-NEXT: [[TRUNC_1:%.*]] = fptrunc double 0.000000e+00 to float
+; CHECK-NEXT: [[TRUNC_2:%.*]] = fptrunc double 1.000000e+00 to float
; CHECK-NEXT: [[INS_1:%.*]] = insertelement <4 x float> zeroinitializer, float poison, i64 0
-; CHECK-NEXT: [[INS_2:%.*]] = insertelement <4 x float> [[INS_1]], float 0.000000e+00, i64 0
+; CHECK-NEXT: [[INS_2:%.*]] = insertelement <4 x float> [[INS_1]], float [[TRUNC_1]], i64 0
+; CHECK-NEXT: [[EXT_1:%.*]] = fpext float [[TRUNC_1]] to double
; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr double, ptr [[TMP0:%.*]], i64 1
-; CHECK-NEXT: store <2 x double> <double 0.000000e+00, double 1.000000e+00>, ptr [[GEP_1]], align 8
+; CHECK-NEXT: store double [[EXT_1]], ptr [[GEP_1]], align 8
+; CHECK-NEXT: [[EXT_2:%.*]] = fpext float [[TRUNC_2]] to double
+; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr double, ptr [[TMP0]], i64 2
+; CHECK-NEXT: store double [[EXT_2]], ptr [[GEP_2]], align 8
; CHECK-NEXT: ret <4 x float> [[INS_2]]
;
%trunc.1 = fptrunc double 0.000000e+00 to float
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
index 9f5744b17cb79e..39f3864820a86b 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
@@ -600,15 +600,27 @@ bb15: ; preds = %bb15, %bb14
define void @test_bounds_removed_before_runtime_checks(ptr %A, ptr %B, i1 %c) {
; CHECK-LABEL: @test_bounds_removed_before_runtime_checks(
; CHECK-NEXT: entry:
-; CHECK-NEXT: store <2 x i32> <i32 10, i32 300>, ptr [[A:%.*]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = fmul float 1.000000e+01, 2.000000e+01
+; CHECK-NEXT: [[TMP2:%.*]] = fptosi float [[TMP1]] to i32
+; CHECK-NEXT: [[TMP3:%.*]] = fmul float 3.000000e+01, 2.000000e+01
+; CHECK-NEXT: [[TMP4:%.*]] = fptosi float [[TMP3]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt i32 100, [[TMP2]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP2]], i32 10
+; CHECK-NEXT: [[TMP7:%.*]] = select i1 false, i32 0, i32 [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt i32 200, [[TMP4]]
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i32 [[TMP4]], i32 300
+; CHECK-NEXT: [[TMP10:%.*]] = select i1 false, i32 0, i32 [[TMP9]]
+; CHECK-NEXT: store i32 [[TMP7]], ptr [[A:%.*]], align 8
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT:%.*]], ptr [[A]], i64 0, i32 1
+; CHECK-NEXT: store i32 [[TMP10]], ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[B:%.*]], align 8
; CHECK-NEXT: br i1 [[C:%.*]], label [[BB23:%.*]], label [[BB14:%.*]]
; CHECK: bb14:
-; CHECK-NEXT: [[TMP15:%.*]] = sext i32 10 to i64
+; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP7]] to i64
; CHECK-NEXT: [[TMP16:%.*]] = add nsw i64 2, [[TMP15]]
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP16]]
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP17]], i64 3
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT:%.*]], ptr [[A]], i64 0, i32 2
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT]], ptr [[A]], i64 0, i32 2
; CHECK-NEXT: store float 0.000000e+00, ptr [[TMP20]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = load i8, ptr [[TMP19]], align 1
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT]], ptr [[A]], i64 0, i32 3
@@ -654,17 +666,18 @@ bb23:
define void @single_membound(ptr %arg, ptr %arg1, double %x) {
; CHECK-LABEL: @single_membound(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[ARG:%.*]], i64 1
; CHECK-NEXT: [[TMP:%.*]] = fsub double [[X:%.*]], 9.900000e+01
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[ARG:%.*]], i64 1
; CHECK-NEXT: store double [[TMP]], ptr [[TMP9]], align 8
; CHECK-NEXT: [[TMP12:%.*]] = load double, ptr [[ARG1:%.*]], align 8
; CHECK-NEXT: [[TMP13:%.*]] = fsub double 1.000000e+00, [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 2
; CHECK-NEXT: br label [[BB15:%.*]]
; CHECK: bb15:
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[TMP]], i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[TMP13]], i32 1
-; CHECK-NEXT: [[TMP2:%.*]] = fmul <2 x double> [[TMP1]], <double 2.000000e+01, double 3.000000e+01>
-; CHECK-NEXT: store <2 x double> [[TMP2]], ptr [[TMP9]], align 8
+; CHECK-NEXT: [[TMP16:%.*]] = fmul double [[TMP]], 2.000000e+01
+; CHECK-NEXT: store double [[TMP16]], ptr [[TMP9]], align 8
+; CHECK-NEXT: [[TMP17:%.*]] = fmul double [[TMP13]], 3.000000e+01
+; CHECK-NEXT: store doub...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/117539
More information about the llvm-commits
mailing list