[llvm] [ARM][SLP] Fix cost function for SLP Vectorization of ZExt/SExt (PR #122713)
Nashe Mncube via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 20 02:05:26 PST 2025
https://github.com/nasherm updated https://github.com/llvm/llvm-project/pull/122713
>From 10de91137dc2d1b357b8acd07f3487012b318116 Mon Sep 17 00:00:00 2001
From: nasmnc01 <nashe.mncube at arm.com>
Date: Thu, 9 Jan 2025 14:04:39 +0000
Subject: [PATCH] [ARM][SLP] Fix incorrect cost function for SLP Vectorization
of ZExt/SExt
PR #117350 made changes to the SLP vectorizer which introduced
a regression on ARM vectorization benchmarks. This was due
to the changes assuming that SExt/ZExt vector instructions have
constant cost. This behaviour is expected for RISCV but not on ARM
where we take into account source and destination type of SExt/ZExt
instructions when calculating vector cost.
Change-Id: I6f995dcde26e5aaf62b779b63e52988fb333f941
---
.../lib/Target/ARM/ARMTargetTransformInfo.cpp | 18 +-
...nsive-arithmetic-extended-reduction-mve.ll | 354 ++++++++++++++++++
2 files changed, 367 insertions(+), 5 deletions(-)
create mode 100644 llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 639f3bf8fc62e3..a0239d19c56079 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1791,11 +1791,19 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ bool IsArithmeticExtendedReduction = is_contained(
+ {ISD::ADD, ISD::FADD, ISD::MUL, ISD::FMUL, ISD::AND, ISD::OR, ISD::XOR},
+ ISD);
+ InstructionCost CastCost =
+ (IsArithmeticExtendedReduction)
+ ? getCastInstrCost(Opcode, ResTy, ValTy, TTI::CastContextHint::None,
+ CostKind)
+ : 0;
+
switch (ISD) {
case ISD::ADD:
if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
-
// The legal cases are:
// VADDV u/s 8/16/32
// VADDLV u/s 32
@@ -1807,14 +1815,14 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
(LT.second == MVT::v8i16 && RevVTSize <= 32) ||
(LT.second == MVT::v4i32 && RevVTSize <= 64)))
- return ST->getMVEVectorCostFactor(CostKind) * LT.first;
+ return CastCost + ST->getMVEVectorCostFactor(CostKind) * LT.first;
}
- break;
+
default:
break;
}
- return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy, FMF,
- CostKind);
+ return CastCost + BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy,
+ ValTy, FMF, CostKind);
}
InstructionCost
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll b/llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll
new file mode 100644
index 00000000000000..fc23141a86a2fc
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll
@@ -0,0 +1,354 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=arm-none-eabi --mattr=+mve | FileCheck %s
+
+
+define dso_local i64 @vadd(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local i64 @vadd(
+; CHECK-SAME: ptr noundef [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[TMP2:%.*]] = alloca ptr, align 4
+; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = add nsw i64 [[TMP6]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 2
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = add nsw i64 [[TMP11]], [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 3
+; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
+; CHECK-NEXT: [[TMP20:%.*]] = sext i32 [[TMP19]] to i64
+; CHECK-NEXT: [[TMP21:%.*]] = add nsw i64 [[TMP16]], [[TMP20]]
+; CHECK-NEXT: ret i64 [[TMP21]]
+;
+ %2 = alloca ptr, align 4
+ store ptr %0, ptr %2, align 4
+ %3 = load ptr, ptr %2, align 4
+ %4 = getelementptr inbounds i32, ptr %3, i32 0
+ %5 = load i32, ptr %4, align 4
+ %6 = sext i32 %5 to i64
+ %7 = load ptr, ptr %2, align 4
+ %8 = getelementptr inbounds i32, ptr %7, i32 1
+ %9 = load i32, ptr %8, align 4
+ %10 = sext i32 %9 to i64
+ %11 = add nsw i64 %6, %10
+ %12 = load ptr, ptr %2, align 4
+ %13 = getelementptr inbounds i32, ptr %12, i32 2
+ %14 = load i32, ptr %13, align 4
+ %15 = sext i32 %14 to i64
+ %16 = add nsw i64 %11, %15
+ %17 = load ptr, ptr %2, align 4
+ %18 = getelementptr inbounds i32, ptr %17, i32 3
+ %19 = load i32, ptr %18, align 4
+ %20 = sext i32 %19 to i64
+ %21 = add nsw i64 %16, %20
+ ret i64 %21
+}
+
+define dso_local i64 @vmul(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local i64 @vmul(
+; CHECK-SAME: ptr noundef [[TMP0:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP2:%.*]] = alloca ptr, align 4
+; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = mul nsw i64 [[TMP6]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 2
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = mul nsw i64 [[TMP11]], [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 3
+; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
+; CHECK-NEXT: [[TMP20:%.*]] = sext i32 [[TMP19]] to i64
+; CHECK-NEXT: [[TMP21:%.*]] = mul nsw i64 [[TMP16]], [[TMP20]]
+; CHECK-NEXT: ret i64 [[TMP21]]
+;
+ %2 = alloca ptr, align 4
+ store ptr %0, ptr %2, align 4
+ %3 = load ptr, ptr %2, align 4
+ %4 = getelementptr inbounds i32, ptr %3, i32 0
+ %5 = load i32, ptr %4, align 4
+ %6 = sext i32 %5 to i64
+ %7 = load ptr, ptr %2, align 4
+ %8 = getelementptr inbounds i32, ptr %7, i32 1
+ %9 = load i32, ptr %8, align 4
+ %10 = sext i32 %9 to i64
+ %11 = mul nsw i64 %6, %10
+ %12 = load ptr, ptr %2, align 4
+ %13 = getelementptr inbounds i32, ptr %12, i32 2
+ %14 = load i32, ptr %13, align 4
+ %15 = sext i32 %14 to i64
+ %16 = mul nsw i64 %11, %15
+ %17 = load ptr, ptr %2, align 4
+ %18 = getelementptr inbounds i32, ptr %17, i32 3
+ %19 = load i32, ptr %18, align 4
+ %20 = sext i32 %19 to i64
+ %21 = mul nsw i64 %16, %20
+ ret i64 %21
+}
+
+define dso_local i64 @vand(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local i64 @vand(
+; CHECK-SAME: ptr noundef [[TMP0:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP2:%.*]] = alloca ptr, align 4
+; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP6]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 2
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP11]], [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 3
+; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
+; CHECK-NEXT: [[TMP20:%.*]] = sext i32 [[TMP19]] to i64
+; CHECK-NEXT: [[TMP21:%.*]] = and i64 [[TMP16]], [[TMP20]]
+; CHECK-NEXT: ret i64 [[TMP21]]
+;
+ %2 = alloca ptr, align 4
+ store ptr %0, ptr %2, align 4
+ %3 = load ptr, ptr %2, align 4
+ %4 = getelementptr inbounds i32, ptr %3, i32 0
+ %5 = load i32, ptr %4, align 4
+ %6 = sext i32 %5 to i64
+ %7 = load ptr, ptr %2, align 4
+ %8 = getelementptr inbounds i32, ptr %7, i32 1
+ %9 = load i32, ptr %8, align 4
+ %10 = sext i32 %9 to i64
+ %11 = and i64 %6, %10
+ %12 = load ptr, ptr %2, align 4
+ %13 = getelementptr inbounds i32, ptr %12, i32 2
+ %14 = load i32, ptr %13, align 4
+ %15 = sext i32 %14 to i64
+ %16 = and i64 %11, %15
+ %17 = load ptr, ptr %2, align 4
+ %18 = getelementptr inbounds i32, ptr %17, i32 3
+ %19 = load i32, ptr %18, align 4
+ %20 = sext i32 %19 to i64
+ %21 = and i64 %16, %20
+ ret i64 %21
+}
+
+define dso_local i64 @vor(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local i64 @vor(
+; CHECK-SAME: ptr noundef [[TMP0:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP2:%.*]] = alloca ptr, align 4
+; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP6]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 2
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = or i64 [[TMP11]], [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 3
+; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
+; CHECK-NEXT: [[TMP20:%.*]] = sext i32 [[TMP19]] to i64
+; CHECK-NEXT: [[TMP21:%.*]] = or i64 [[TMP16]], [[TMP20]]
+; CHECK-NEXT: ret i64 [[TMP21]]
+;
+ %2 = alloca ptr, align 4
+ store ptr %0, ptr %2, align 4
+ %3 = load ptr, ptr %2, align 4
+ %4 = getelementptr inbounds i32, ptr %3, i32 0
+ %5 = load i32, ptr %4, align 4
+ %6 = sext i32 %5 to i64
+ %7 = load ptr, ptr %2, align 4
+ %8 = getelementptr inbounds i32, ptr %7, i32 1
+ %9 = load i32, ptr %8, align 4
+ %10 = sext i32 %9 to i64
+ %11 = or i64 %6, %10
+ %12 = load ptr, ptr %2, align 4
+ %13 = getelementptr inbounds i32, ptr %12, i32 2
+ %14 = load i32, ptr %13, align 4
+ %15 = sext i32 %14 to i64
+ %16 = or i64 %11, %15
+ %17 = load ptr, ptr %2, align 4
+ %18 = getelementptr inbounds i32, ptr %17, i32 3
+ %19 = load i32, ptr %18, align 4
+ %20 = sext i32 %19 to i64
+ %21 = or i64 %16, %20
+ ret i64 %21
+}
+
+define dso_local i64 @vxor(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local i64 @vxor(
+; CHECK-SAME: ptr noundef [[TMP0:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP2:%.*]] = alloca ptr, align 4
+; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP6]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 2
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = xor i64 [[TMP11]], [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 3
+; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
+; CHECK-NEXT: [[TMP20:%.*]] = sext i32 [[TMP19]] to i64
+; CHECK-NEXT: [[TMP21:%.*]] = xor i64 [[TMP16]], [[TMP20]]
+; CHECK-NEXT: ret i64 [[TMP21]]
+;
+ %2 = alloca ptr, align 4
+ store ptr %0, ptr %2, align 4
+ %3 = load ptr, ptr %2, align 4
+ %4 = getelementptr inbounds i32, ptr %3, i32 0
+ %5 = load i32, ptr %4, align 4
+ %6 = sext i32 %5 to i64
+ %7 = load ptr, ptr %2, align 4
+ %8 = getelementptr inbounds i32, ptr %7, i32 1
+ %9 = load i32, ptr %8, align 4
+ %10 = sext i32 %9 to i64
+ %11 = xor i64 %6, %10
+ %12 = load ptr, ptr %2, align 4
+ %13 = getelementptr inbounds i32, ptr %12, i32 2
+ %14 = load i32, ptr %13, align 4
+ %15 = sext i32 %14 to i64
+ %16 = xor i64 %11, %15
+ %17 = load ptr, ptr %2, align 4
+ %18 = getelementptr inbounds i32, ptr %17, i32 3
+ %19 = load i32, ptr %18, align 4
+ %20 = sext i32 %19 to i64
+ %21 = xor i64 %16, %20
+ ret i64 %21
+}
+
+define dso_local double @vfadd(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local double @vfadd(
+; CHECK-SAME: ptr noundef [[TMP0:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP2:%.*]] = alloca ptr, align 4
+; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = fpext float [[TMP5]] to double
+; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = fpext float [[TMP9]] to double
+; CHECK-NEXT: [[TMP11:%.*]] = fadd double [[TMP6]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 2
+; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = fpext float [[TMP14]] to double
+; CHECK-NEXT: [[TMP16:%.*]] = fadd double [[TMP11]], [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 3
+; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[TMP18]], align 4
+; CHECK-NEXT: [[TMP20:%.*]] = fpext float [[TMP19]] to double
+; CHECK-NEXT: [[TMP21:%.*]] = fadd double [[TMP16]], [[TMP20]]
+; CHECK-NEXT: ret double [[TMP21]]
+;
+ %2 = alloca ptr, align 4
+ store ptr %0, ptr %2, align 4
+ %3 = load ptr, ptr %2, align 4
+ %4 = getelementptr inbounds float, ptr %3, i32 0
+ %5 = load float, ptr %4, align 4
+ %6 = fpext float %5 to double
+ %7 = load ptr, ptr %2, align 4
+ %8 = getelementptr inbounds float, ptr %7, i32 1
+ %9 = load float, ptr %8, align 4
+ %10 = fpext float %9 to double
+ %11 = fadd double %6, %10
+ %12 = load ptr, ptr %2, align 4
+ %13 = getelementptr inbounds float, ptr %12, i32 2
+ %14 = load float, ptr %13, align 4
+ %15 = fpext float %14 to double
+ %16 = fadd double %11, %15
+ %17 = load ptr, ptr %2, align 4
+ %18 = getelementptr inbounds float, ptr %17, i32 3
+ %19 = load float, ptr %18, align 4
+ %20 = fpext float %19 to double
+ %21 = fadd double %16, %20
+ ret double %21
+}
+
+define dso_local double @vfmul(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local double @vfmul(
+; CHECK-SAME: ptr noundef [[TMP0:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP2:%.*]] = alloca ptr, align 4
+; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = fpext float [[TMP5]] to double
+; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = fpext float [[TMP9]] to double
+; CHECK-NEXT: [[TMP11:%.*]] = fmul double [[TMP6]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 2
+; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = fpext float [[TMP14]] to double
+; CHECK-NEXT: [[TMP16:%.*]] = fmul double [[TMP11]], [[TMP15]]
+; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 3
+; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[TMP18]], align 4
+; CHECK-NEXT: [[TMP20:%.*]] = fpext float [[TMP19]] to double
+; CHECK-NEXT: [[TMP21:%.*]] = fmul double [[TMP16]], [[TMP20]]
+; CHECK-NEXT: ret double [[TMP21]]
+;
+ %2 = alloca ptr, align 4
+ store ptr %0, ptr %2, align 4
+ %3 = load ptr, ptr %2, align 4
+ %4 = getelementptr inbounds float, ptr %3, i32 0
+ %5 = load float, ptr %4, align 4
+ %6 = fpext float %5 to double
+ %7 = load ptr, ptr %2, align 4
+ %8 = getelementptr inbounds float, ptr %7, i32 1
+ %9 = load float, ptr %8, align 4
+ %10 = fpext float %9 to double
+ %11 = fmul double %6, %10
+ %12 = load ptr, ptr %2, align 4
+ %13 = getelementptr inbounds float, ptr %12, i32 2
+ %14 = load float, ptr %13, align 4
+ %15 = fpext float %14 to double
+ %16 = fmul double %11, %15
+ %17 = load ptr, ptr %2, align 4
+ %18 = getelementptr inbounds float, ptr %17, i32 3
+ %19 = load float, ptr %18, align 4
+ %20 = fpext float %19 to double
+ %21 = fmul double %16, %20
+ ret double %21
+}
+
More information about the llvm-commits
mailing list