[llvm] [ARM][SLP] Fix cost function for SLP Vectorization of ZExt/SExt (PR #122713)

Nashe Mncube via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 22 04:59:45 PST 2025


https://github.com/nasherm updated https://github.com/llvm/llvm-project/pull/122713

>From 28b9d6a18ead88930e0b8836f97c1161dd78aac2 Mon Sep 17 00:00:00 2001
From: nasmnc01 <nashe.mncube at arm.com>
Date: Thu, 9 Jan 2025 14:04:39 +0000
Subject: [PATCH 1/3] [ARM][SLP] Fix incorrect cost function for SLP
 Vectorization of ZExt/SExt

PR #117350 made changes to the SLP vectorizer which introduced
a regression on ARM vectorization benchmarks. This was due
to the changes assuming that SExt/ZExt vector instructions have
constant cost. This behaviour is expected for RISCV but not on ARM
where we take into account source and destination type of SExt/ZExt
instructions when calculating vector cost.

Change-Id: I6f995dcde26e5aaf62b779b63e52988fb333f941
---
 .../lib/Target/ARM/ARMTargetTransformInfo.cpp |  26 +-
 ...nsive-arithmetic-extended-reduction-mve.ll | 285 ++++++++++++++++++
 2 files changed, 309 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll

diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 639f3bf8fc62e3..3e282639449f88 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1791,11 +1791,33 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
 
   int ISD = TLI->InstructionOpcodeToISD(Opcode);
 
+  auto CastCost = [=]() -> unsigned {
+    // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
+    // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
+    // are linearised so take more.
+    static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
+        {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1},
+        {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1},
+        {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2},
+        {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2},
+        {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1},
+        {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1},
+    };
+
+    if (ST->hasMVEIntegerOps()) {
+      if (const auto *Entry = ConvertCostTableLookup(
+              MVEVectorConversionTbl,
+              (IsUnsigned) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
+              ResVT.getSimpleVT(), ValVT.getSimpleVT()))
+        return Entry->Cost;
+    }
+    return 0;
+  };
+
   switch (ISD) {
   case ISD::ADD:
     if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
       std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
-
       // The legal cases are:
       //   VADDV u/s 8/16/32
       //   VADDLV u/s 32
@@ -1807,7 +1829,7 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
           ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
            (LT.second == MVT::v8i16 && RevVTSize <= 32) ||
            (LT.second == MVT::v4i32 && RevVTSize <= 64)))
-        return ST->getMVEVectorCostFactor(CostKind) * LT.first;
+        return CastCost() + ST->getMVEVectorCostFactor(CostKind) * LT.first;
     }
     break;
   default:
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll b/llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll
new file mode 100644
index 00000000000000..f84bc7dc076f1b
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll
@@ -0,0 +1,285 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes="default<O1>,slp-vectorizer" -S -mtriple=arm-none-eabi --mattr=+mve | FileCheck %s
+
+
+define dso_local i64 @vadd(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local range(i64 -8589934592, 8589934589) i64 @vadd(
+; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64>
+; CHECK-NEXT:    [[TMP21:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP3]])
+; CHECK-NEXT:    ret i64 [[TMP21]]
+;
+  %2 = alloca ptr, align 4
+  store ptr %0, ptr %2, align 4
+  %3 = load ptr, ptr %2, align 4
+  %4 = getelementptr inbounds i32, ptr %3, i32 0
+  %5 = load i32, ptr %4, align 4
+  %6 = sext i32 %5 to i64
+  %7 = load ptr, ptr %2, align 4
+  %8 = getelementptr inbounds i32, ptr %7, i32 1
+  %9 = load i32, ptr %8, align 4
+  %10 = sext i32 %9 to i64
+  %11 = add nsw i64 %6, %10
+  %12 = load ptr, ptr %2, align 4
+  %13 = getelementptr inbounds i32, ptr %12, i32 2
+  %14 = load i32, ptr %13, align 4
+  %15 = sext i32 %14 to i64
+  %16 = add nsw i64 %11, %15
+  %17 = load ptr, ptr %2, align 4
+  %18 = getelementptr inbounds i32, ptr %17, i32 3
+  %19 = load i32, ptr %18, align 4
+  %20 = sext i32 %19 to i64
+  %21 = add nsw i64 %16, %20
+  ret i64 %21
+}
+
+define dso_local i64 @vmul(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local i64 @vmul(
+; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = mul nsw i64 [[TMP10]], [[TMP6]]
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = mul nsw i64 [[TMP11]], [[TMP15]]
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
+; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
+; CHECK-NEXT:    [[TMP20:%.*]] = sext i32 [[TMP19]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = mul nsw i64 [[TMP16]], [[TMP20]]
+; CHECK-NEXT:    ret i64 [[TMP21]]
+;
+  %2 = alloca ptr, align 4
+  store ptr %0, ptr %2, align 4
+  %3 = load ptr, ptr %2, align 4
+  %4 = getelementptr inbounds i32, ptr %3, i32 0
+  %5 = load i32, ptr %4, align 4
+  %6 = sext i32 %5 to i64
+  %7 = load ptr, ptr %2, align 4
+  %8 = getelementptr inbounds i32, ptr %7, i32 1
+  %9 = load i32, ptr %8, align 4
+  %10 = sext i32 %9 to i64
+  %11 = mul nsw i64 %6, %10
+  %12 = load ptr, ptr %2, align 4
+  %13 = getelementptr inbounds i32, ptr %12, i32 2
+  %14 = load i32, ptr %13, align 4
+  %15 = sext i32 %14 to i64
+  %16 = mul nsw i64 %11, %15
+  %17 = load ptr, ptr %2, align 4
+  %18 = getelementptr inbounds i32, ptr %17, i32 3
+  %19 = load i32, ptr %18, align 4
+  %20 = sext i32 %19 to i64
+  %21 = mul nsw i64 %16, %20
+  ret i64 %21
+}
+
+define dso_local i64 @vand(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local range(i64 -2147483648, 2147483648) i64 @vand(
+; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[TMP9]], [[TMP2]]
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = and i32 [[TMP5]], [[TMP14]]
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
+; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = and i32 [[TMP10]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = sext i32 [[TMP11]] to i64
+; CHECK-NEXT:    ret i64 [[TMP21]]
+;
+  %2 = alloca ptr, align 4
+  store ptr %0, ptr %2, align 4
+  %3 = load ptr, ptr %2, align 4
+  %4 = getelementptr inbounds i32, ptr %3, i32 0
+  %5 = load i32, ptr %4, align 4
+  %6 = sext i32 %5 to i64
+  %7 = load ptr, ptr %2, align 4
+  %8 = getelementptr inbounds i32, ptr %7, i32 1
+  %9 = load i32, ptr %8, align 4
+  %10 = sext i32 %9 to i64
+  %11 = and i64 %6, %10
+  %12 = load ptr, ptr %2, align 4
+  %13 = getelementptr inbounds i32, ptr %12, i32 2
+  %14 = load i32, ptr %13, align 4
+  %15 = sext i32 %14 to i64
+  %16 = and i64 %11, %15
+  %17 = load ptr, ptr %2, align 4
+  %18 = getelementptr inbounds i32, ptr %17, i32 3
+  %19 = load i32, ptr %18, align 4
+  %20 = sext i32 %19 to i64
+  %21 = and i64 %16, %20
+  ret i64 %21
+}
+
+define dso_local i64 @vor(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local range(i64 -2147483648, 2147483648) i64 @vor(
+; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP9]], [[TMP2]]
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = or i32 [[TMP5]], [[TMP14]]
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
+; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = or i32 [[TMP10]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = sext i32 [[TMP11]] to i64
+; CHECK-NEXT:    ret i64 [[TMP21]]
+;
+  %2 = alloca ptr, align 4
+  store ptr %0, ptr %2, align 4
+  %3 = load ptr, ptr %2, align 4
+  %4 = getelementptr inbounds i32, ptr %3, i32 0
+  %5 = load i32, ptr %4, align 4
+  %6 = sext i32 %5 to i64
+  %7 = load ptr, ptr %2, align 4
+  %8 = getelementptr inbounds i32, ptr %7, i32 1
+  %9 = load i32, ptr %8, align 4
+  %10 = sext i32 %9 to i64
+  %11 = or i64 %6, %10
+  %12 = load ptr, ptr %2, align 4
+  %13 = getelementptr inbounds i32, ptr %12, i32 2
+  %14 = load i32, ptr %13, align 4
+  %15 = sext i32 %14 to i64
+  %16 = or i64 %11, %15
+  %17 = load ptr, ptr %2, align 4
+  %18 = getelementptr inbounds i32, ptr %17, i32 3
+  %19 = load i32, ptr %18, align 4
+  %20 = sext i32 %19 to i64
+  %21 = or i64 %16, %20
+  ret i64 %21
+}
+
+define dso_local i64 @vxor(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local range(i64 -2147483648, 2147483648) i64 @vxor(
+; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP9]], [[TMP2]]
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i32 [[TMP5]], [[TMP14]]
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
+; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i32 [[TMP10]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = sext i32 [[TMP11]] to i64
+; CHECK-NEXT:    ret i64 [[TMP21]]
+;
+  %2 = alloca ptr, align 4
+  store ptr %0, ptr %2, align 4
+  %3 = load ptr, ptr %2, align 4
+  %4 = getelementptr inbounds i32, ptr %3, i32 0
+  %5 = load i32, ptr %4, align 4
+  %6 = sext i32 %5 to i64
+  %7 = load ptr, ptr %2, align 4
+  %8 = getelementptr inbounds i32, ptr %7, i32 1
+  %9 = load i32, ptr %8, align 4
+  %10 = sext i32 %9 to i64
+  %11 = xor i64 %6, %10
+  %12 = load ptr, ptr %2, align 4
+  %13 = getelementptr inbounds i32, ptr %12, i32 2
+  %14 = load i32, ptr %13, align 4
+  %15 = sext i32 %14 to i64
+  %16 = xor i64 %11, %15
+  %17 = load ptr, ptr %2, align 4
+  %18 = getelementptr inbounds i32, ptr %17, i32 3
+  %19 = load i32, ptr %18, align 4
+  %20 = sext i32 %19 to i64
+  %21 = xor i64 %16, %20
+  ret i64 %21
+}
+
+define dso_local double @vfadd(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local double @vfadd(
+; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = fpext float [[TMP5]] to double
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load float, ptr [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = fpext float [[TMP9]] to double
+; CHECK-NEXT:    [[TMP11:%.*]] = fadd double [[TMP6]], [[TMP10]]
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load float, ptr [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = fpext float [[TMP14]] to double
+; CHECK-NEXT:    [[TMP16:%.*]] = fadd double [[TMP11]], [[TMP15]]
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
+; CHECK-NEXT:    [[TMP19:%.*]] = load float, ptr [[TMP18]], align 4
+; CHECK-NEXT:    [[TMP20:%.*]] = fpext float [[TMP19]] to double
+; CHECK-NEXT:    [[TMP21:%.*]] = fadd double [[TMP16]], [[TMP20]]
+; CHECK-NEXT:    ret double [[TMP21]]
+;
+  %2 = alloca ptr, align 4
+  store ptr %0, ptr %2, align 4
+  %3 = load ptr, ptr %2, align 4
+  %4 = getelementptr inbounds float, ptr %3, i32 0
+  %5 = load float, ptr %4, align 4
+  %6 = fpext float %5 to double
+  %7 = load ptr, ptr %2, align 4
+  %8 = getelementptr inbounds float, ptr %7, i32 1
+  %9 = load float, ptr %8, align 4
+  %10 = fpext float %9 to double
+  %11 = fadd double %6, %10
+  %12 = load ptr, ptr %2, align 4
+  %13 = getelementptr inbounds float, ptr %12, i32 2
+  %14 = load float, ptr %13, align 4
+  %15 = fpext float %14 to double
+  %16 = fadd double %11, %15
+  %17 = load ptr, ptr %2, align 4
+  %18 = getelementptr inbounds float, ptr %17, i32 3
+  %19 = load float, ptr %18, align 4
+  %20 = fpext float %19 to double
+  %21 = fadd double %16, %20
+  ret double %21
+}
+
+define dso_local double @vfmul(ptr noundef %0) #0 {
+; CHECK-LABEL: define dso_local double @vfmul(
+; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = fpext float [[TMP5]] to double
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load float, ptr [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = fpext float [[TMP9]] to double
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul double [[TMP6]], [[TMP10]]
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load float, ptr [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = fpext float [[TMP14]] to double
+; CHECK-NEXT:    [[TMP16:%.*]] = fmul double [[TMP11]], [[TMP15]]
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
+; CHECK-NEXT:    [[TMP19:%.*]] = load float, ptr [[TMP18]], align 4
+; CHECK-NEXT:    [[TMP20:%.*]] = fpext float [[TMP19]] to double
+; CHECK-NEXT:    [[TMP21:%.*]] = fmul double [[TMP16]], [[TMP20]]
+; CHECK-NEXT:    ret double [[TMP21]]
+;
+  %2 = alloca ptr, align 4
+  store ptr %0, ptr %2, align 4
+  %3 = load ptr, ptr %2, align 4
+  %4 = getelementptr inbounds float, ptr %3, i32 0
+  %5 = load float, ptr %4, align 4
+  %6 = fpext float %5 to double
+  %7 = load ptr, ptr %2, align 4
+  %8 = getelementptr inbounds float, ptr %7, i32 1
+  %9 = load float, ptr %8, align 4
+  %10 = fpext float %9 to double
+  %11 = fmul double %6, %10
+  %12 = load ptr, ptr %2, align 4
+  %13 = getelementptr inbounds float, ptr %12, i32 2
+  %14 = load float, ptr %13, align 4
+  %15 = fpext float %14 to double
+  %16 = fmul double %11, %15
+  %17 = load ptr, ptr %2, align 4
+  %18 = getelementptr inbounds float, ptr %17, i32 3
+  %19 = load float, ptr %18, align 4
+  %20 = fpext float %19 to double
+  %21 = fmul double %16, %20
+  ret double %21
+}
+

>From d245bc24a0262ce46a26e3c5c29db1a562f06a66 Mon Sep 17 00:00:00 2001
From: nasmnc01 <nashe.mncube at arm.com>
Date: Tue, 21 Jan 2025 14:13:43 +0000
Subject: [PATCH 2/3] Update and clean test

Change-Id: I5f31b981d28ccc5a8f8f043eeabf54b672720613
---
 .../lib/Target/ARM/ARMTargetTransformInfo.cpp |   2 +
 ...nsive-arithmetic-extended-reduction-mve.ll | 285 ------------------
 .../Transforms/SLPVectorizer/ARM/vadd-mve.ll  | 211 +++++++++++++
 3 files changed, 213 insertions(+), 285 deletions(-)
 delete mode 100644 llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll
 create mode 100644 llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll

diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 3e282639449f88..cd186fcb1e88ca 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1802,6 +1802,8 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2},
         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1},
         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1},
+        {ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1},
+        {ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1},
     };
 
     if (ST->hasMVEIntegerOps()) {
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll b/llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll
deleted file mode 100644
index f84bc7dc076f1b..00000000000000
--- a/llvm/test/Transforms/SLPVectorizer/ARM/expensive-arithmetic-extended-reduction-mve.ll
+++ /dev/null
@@ -1,285 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -passes="default<O1>,slp-vectorizer" -S -mtriple=arm-none-eabi --mattr=+mve | FileCheck %s
-
-
-define dso_local i64 @vadd(ptr noundef %0) #0 {
-; CHECK-LABEL: define dso_local range(i64 -8589934592, 8589934589) i64 @vadd(
-; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64>
-; CHECK-NEXT:    [[TMP21:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP3]])
-; CHECK-NEXT:    ret i64 [[TMP21]]
-;
-  %2 = alloca ptr, align 4
-  store ptr %0, ptr %2, align 4
-  %3 = load ptr, ptr %2, align 4
-  %4 = getelementptr inbounds i32, ptr %3, i32 0
-  %5 = load i32, ptr %4, align 4
-  %6 = sext i32 %5 to i64
-  %7 = load ptr, ptr %2, align 4
-  %8 = getelementptr inbounds i32, ptr %7, i32 1
-  %9 = load i32, ptr %8, align 4
-  %10 = sext i32 %9 to i64
-  %11 = add nsw i64 %6, %10
-  %12 = load ptr, ptr %2, align 4
-  %13 = getelementptr inbounds i32, ptr %12, i32 2
-  %14 = load i32, ptr %13, align 4
-  %15 = sext i32 %14 to i64
-  %16 = add nsw i64 %11, %15
-  %17 = load ptr, ptr %2, align 4
-  %18 = getelementptr inbounds i32, ptr %17, i32 3
-  %19 = load i32, ptr %18, align 4
-  %20 = sext i32 %19 to i64
-  %21 = add nsw i64 %16, %20
-  ret i64 %21
-}
-
-define dso_local i64 @vmul(ptr noundef %0) #0 {
-; CHECK-LABEL: define dso_local i64 @vmul(
-; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
-; CHECK-NEXT:    [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
-; CHECK-NEXT:    [[TMP11:%.*]] = mul nsw i64 [[TMP10]], [[TMP6]]
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
-; CHECK-NEXT:    [[TMP16:%.*]] = mul nsw i64 [[TMP11]], [[TMP15]]
-; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
-; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
-; CHECK-NEXT:    [[TMP20:%.*]] = sext i32 [[TMP19]] to i64
-; CHECK-NEXT:    [[TMP21:%.*]] = mul nsw i64 [[TMP16]], [[TMP20]]
-; CHECK-NEXT:    ret i64 [[TMP21]]
-;
-  %2 = alloca ptr, align 4
-  store ptr %0, ptr %2, align 4
-  %3 = load ptr, ptr %2, align 4
-  %4 = getelementptr inbounds i32, ptr %3, i32 0
-  %5 = load i32, ptr %4, align 4
-  %6 = sext i32 %5 to i64
-  %7 = load ptr, ptr %2, align 4
-  %8 = getelementptr inbounds i32, ptr %7, i32 1
-  %9 = load i32, ptr %8, align 4
-  %10 = sext i32 %9 to i64
-  %11 = mul nsw i64 %6, %10
-  %12 = load ptr, ptr %2, align 4
-  %13 = getelementptr inbounds i32, ptr %12, i32 2
-  %14 = load i32, ptr %13, align 4
-  %15 = sext i32 %14 to i64
-  %16 = mul nsw i64 %11, %15
-  %17 = load ptr, ptr %2, align 4
-  %18 = getelementptr inbounds i32, ptr %17, i32 3
-  %19 = load i32, ptr %18, align 4
-  %20 = sext i32 %19 to i64
-  %21 = mul nsw i64 %16, %20
-  ret i64 %21
-}
-
-define dso_local i64 @vand(ptr noundef %0) #0 {
-; CHECK-LABEL: define dso_local range(i64 -2147483648, 2147483648) i64 @vand(
-; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[TMP9]], [[TMP2]]
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP10:%.*]] = and i32 [[TMP5]], [[TMP14]]
-; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
-; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
-; CHECK-NEXT:    [[TMP11:%.*]] = and i32 [[TMP10]], [[TMP19]]
-; CHECK-NEXT:    [[TMP21:%.*]] = sext i32 [[TMP11]] to i64
-; CHECK-NEXT:    ret i64 [[TMP21]]
-;
-  %2 = alloca ptr, align 4
-  store ptr %0, ptr %2, align 4
-  %3 = load ptr, ptr %2, align 4
-  %4 = getelementptr inbounds i32, ptr %3, i32 0
-  %5 = load i32, ptr %4, align 4
-  %6 = sext i32 %5 to i64
-  %7 = load ptr, ptr %2, align 4
-  %8 = getelementptr inbounds i32, ptr %7, i32 1
-  %9 = load i32, ptr %8, align 4
-  %10 = sext i32 %9 to i64
-  %11 = and i64 %6, %10
-  %12 = load ptr, ptr %2, align 4
-  %13 = getelementptr inbounds i32, ptr %12, i32 2
-  %14 = load i32, ptr %13, align 4
-  %15 = sext i32 %14 to i64
-  %16 = and i64 %11, %15
-  %17 = load ptr, ptr %2, align 4
-  %18 = getelementptr inbounds i32, ptr %17, i32 3
-  %19 = load i32, ptr %18, align 4
-  %20 = sext i32 %19 to i64
-  %21 = and i64 %16, %20
-  ret i64 %21
-}
-
-define dso_local i64 @vor(ptr noundef %0) #0 {
-; CHECK-LABEL: define dso_local range(i64 -2147483648, 2147483648) i64 @vor(
-; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP9]], [[TMP2]]
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP10:%.*]] = or i32 [[TMP5]], [[TMP14]]
-; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
-; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
-; CHECK-NEXT:    [[TMP11:%.*]] = or i32 [[TMP10]], [[TMP19]]
-; CHECK-NEXT:    [[TMP21:%.*]] = sext i32 [[TMP11]] to i64
-; CHECK-NEXT:    ret i64 [[TMP21]]
-;
-  %2 = alloca ptr, align 4
-  store ptr %0, ptr %2, align 4
-  %3 = load ptr, ptr %2, align 4
-  %4 = getelementptr inbounds i32, ptr %3, i32 0
-  %5 = load i32, ptr %4, align 4
-  %6 = sext i32 %5 to i64
-  %7 = load ptr, ptr %2, align 4
-  %8 = getelementptr inbounds i32, ptr %7, i32 1
-  %9 = load i32, ptr %8, align 4
-  %10 = sext i32 %9 to i64
-  %11 = or i64 %6, %10
-  %12 = load ptr, ptr %2, align 4
-  %13 = getelementptr inbounds i32, ptr %12, i32 2
-  %14 = load i32, ptr %13, align 4
-  %15 = sext i32 %14 to i64
-  %16 = or i64 %11, %15
-  %17 = load ptr, ptr %2, align 4
-  %18 = getelementptr inbounds i32, ptr %17, i32 3
-  %19 = load i32, ptr %18, align 4
-  %20 = sext i32 %19 to i64
-  %21 = or i64 %16, %20
-  ret i64 %21
-}
-
-define dso_local i64 @vxor(ptr noundef %0) #0 {
-; CHECK-LABEL: define dso_local range(i64 -2147483648, 2147483648) i64 @vxor(
-; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP9]], [[TMP2]]
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP10:%.*]] = xor i32 [[TMP5]], [[TMP14]]
-; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
-; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
-; CHECK-NEXT:    [[TMP11:%.*]] = xor i32 [[TMP10]], [[TMP19]]
-; CHECK-NEXT:    [[TMP21:%.*]] = sext i32 [[TMP11]] to i64
-; CHECK-NEXT:    ret i64 [[TMP21]]
-;
-  %2 = alloca ptr, align 4
-  store ptr %0, ptr %2, align 4
-  %3 = load ptr, ptr %2, align 4
-  %4 = getelementptr inbounds i32, ptr %3, i32 0
-  %5 = load i32, ptr %4, align 4
-  %6 = sext i32 %5 to i64
-  %7 = load ptr, ptr %2, align 4
-  %8 = getelementptr inbounds i32, ptr %7, i32 1
-  %9 = load i32, ptr %8, align 4
-  %10 = sext i32 %9 to i64
-  %11 = xor i64 %6, %10
-  %12 = load ptr, ptr %2, align 4
-  %13 = getelementptr inbounds i32, ptr %12, i32 2
-  %14 = load i32, ptr %13, align 4
-  %15 = sext i32 %14 to i64
-  %16 = xor i64 %11, %15
-  %17 = load ptr, ptr %2, align 4
-  %18 = getelementptr inbounds i32, ptr %17, i32 3
-  %19 = load i32, ptr %18, align 4
-  %20 = sext i32 %19 to i64
-  %21 = xor i64 %16, %20
-  ret i64 %21
-}
-
-define dso_local double @vfadd(ptr noundef %0) #0 {
-; CHECK-LABEL: define dso_local double @vfadd(
-; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[TMP0]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = fpext float [[TMP5]] to double
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
-; CHECK-NEXT:    [[TMP9:%.*]] = load float, ptr [[TMP8]], align 4
-; CHECK-NEXT:    [[TMP10:%.*]] = fpext float [[TMP9]] to double
-; CHECK-NEXT:    [[TMP11:%.*]] = fadd double [[TMP6]], [[TMP10]]
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load float, ptr [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP15:%.*]] = fpext float [[TMP14]] to double
-; CHECK-NEXT:    [[TMP16:%.*]] = fadd double [[TMP11]], [[TMP15]]
-; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
-; CHECK-NEXT:    [[TMP19:%.*]] = load float, ptr [[TMP18]], align 4
-; CHECK-NEXT:    [[TMP20:%.*]] = fpext float [[TMP19]] to double
-; CHECK-NEXT:    [[TMP21:%.*]] = fadd double [[TMP16]], [[TMP20]]
-; CHECK-NEXT:    ret double [[TMP21]]
-;
-  %2 = alloca ptr, align 4
-  store ptr %0, ptr %2, align 4
-  %3 = load ptr, ptr %2, align 4
-  %4 = getelementptr inbounds float, ptr %3, i32 0
-  %5 = load float, ptr %4, align 4
-  %6 = fpext float %5 to double
-  %7 = load ptr, ptr %2, align 4
-  %8 = getelementptr inbounds float, ptr %7, i32 1
-  %9 = load float, ptr %8, align 4
-  %10 = fpext float %9 to double
-  %11 = fadd double %6, %10
-  %12 = load ptr, ptr %2, align 4
-  %13 = getelementptr inbounds float, ptr %12, i32 2
-  %14 = load float, ptr %13, align 4
-  %15 = fpext float %14 to double
-  %16 = fadd double %11, %15
-  %17 = load ptr, ptr %2, align 4
-  %18 = getelementptr inbounds float, ptr %17, i32 3
-  %19 = load float, ptr %18, align 4
-  %20 = fpext float %19 to double
-  %21 = fadd double %16, %20
-  ret double %21
-}
-
-define dso_local double @vfmul(ptr noundef %0) #0 {
-; CHECK-LABEL: define dso_local double @vfmul(
-; CHECK-SAME: ptr nocapture noundef readonly [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[TMP0]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = fpext float [[TMP5]] to double
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 4
-; CHECK-NEXT:    [[TMP9:%.*]] = load float, ptr [[TMP8]], align 4
-; CHECK-NEXT:    [[TMP10:%.*]] = fpext float [[TMP9]] to double
-; CHECK-NEXT:    [[TMP11:%.*]] = fmul double [[TMP6]], [[TMP10]]
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load float, ptr [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP15:%.*]] = fpext float [[TMP14]] to double
-; CHECK-NEXT:    [[TMP16:%.*]] = fmul double [[TMP11]], [[TMP15]]
-; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i32 12
-; CHECK-NEXT:    [[TMP19:%.*]] = load float, ptr [[TMP18]], align 4
-; CHECK-NEXT:    [[TMP20:%.*]] = fpext float [[TMP19]] to double
-; CHECK-NEXT:    [[TMP21:%.*]] = fmul double [[TMP16]], [[TMP20]]
-; CHECK-NEXT:    ret double [[TMP21]]
-;
-  %2 = alloca ptr, align 4
-  store ptr %0, ptr %2, align 4
-  %3 = load ptr, ptr %2, align 4
-  %4 = getelementptr inbounds float, ptr %3, i32 0
-  %5 = load float, ptr %4, align 4
-  %6 = fpext float %5 to double
-  %7 = load ptr, ptr %2, align 4
-  %8 = getelementptr inbounds float, ptr %7, i32 1
-  %9 = load float, ptr %8, align 4
-  %10 = fpext float %9 to double
-  %11 = fmul double %6, %10
-  %12 = load ptr, ptr %2, align 4
-  %13 = getelementptr inbounds float, ptr %12, i32 2
-  %14 = load float, ptr %13, align 4
-  %15 = fpext float %14 to double
-  %16 = fmul double %11, %15
-  %17 = load ptr, ptr %2, align 4
-  %18 = getelementptr inbounds float, ptr %17, i32 3
-  %19 = load float, ptr %18, align 4
-  %20 = fpext float %19 to double
-  %21 = fmul double %16, %20
-  ret double %21
-}
-
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll b/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll
new file mode 100644
index 00000000000000..6a36e1af188138
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll
@@ -0,0 +1,211 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes="default<O1>,slp-vectorizer" --mtriple arm-none-eabi -mattr=+mve -S -o - | FileCheck %s
+
+define  i64 @vadd_32_64(ptr %a) {
+; CHECK-LABEL: define range(i64 -8589934592, 8589934589) i64 @vadd_32_64(
+; CHECK-SAME: ptr nocapture readonly [[A:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i32> [[TMP0]] to <4 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+entry:
+  %a.addr = alloca ptr, align 4
+  store ptr %a, ptr %a.addr, align 4
+  %0 = load ptr, ptr %a.addr, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %0, i32 0
+  %1 = load i32, ptr %arrayidx, align 4
+  %conv = sext i32 %1 to i64
+  %2 = load ptr, ptr %a.addr, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %2, i32 1
+  %3 = load i32, ptr %arrayidx1, align 4
+  %conv2 = sext i32 %3 to i64
+  %add = add nsw i64 %conv, %conv2
+  %4 = load ptr, ptr %a.addr, align 4
+  %arrayidx3 = getelementptr inbounds i32, ptr %4, i32 2
+  %5 = load i32, ptr %arrayidx3, align 4
+  %conv4 = sext i32 %5 to i64
+  %add5 = add nsw i64 %add, %conv4
+  %6 = load ptr, ptr %a.addr, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %6, i32 3
+  %7 = load i32, ptr %arrayidx6, align 4
+  %conv7 = sext i32 %7 to i64
+  %add8 = add nsw i64 %add5, %conv7
+  ret i64 %add8
+}
+
+define  i64 @vadd_16_64(ptr %a) {
+; CHECK-LABEL: define range(i64 0, 262141) i64 @vadd_16_64(
+; CHECK-SAME: ptr nocapture readonly [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 2
+; CHECK-NEXT:    [[TMP1:%.*]] = zext <4 x i16> [[TMP0]] to <4 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]])
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    ret i64 [[TMP3]]
+;
+entry:
+  %a.addr = alloca ptr, align 4
+  store ptr %a, ptr %a.addr, align 4
+  %0 = load ptr, ptr %a.addr, align 4
+  %arrayidx = getelementptr inbounds i16, ptr %0, i32 0
+  %1 = load i16, ptr %arrayidx, align 2
+  %conv = zext i16 %1 to i64
+  %2 = load ptr, ptr %a.addr, align 4
+  %arrayidx1 = getelementptr inbounds i16, ptr %2, i32 1
+  %3 = load i16, ptr %arrayidx1, align 2
+  %conv2 = zext i16 %3 to i64
+  %add = add nsw i64 %conv, %conv2
+  %4 = load ptr, ptr %a.addr, align 4
+  %arrayidx3 = getelementptr inbounds i16, ptr %4, i32 2
+  %5 = load i16, ptr %arrayidx3, align 2
+  %conv4 = zext i16 %5 to i64
+  %add5 = add nsw i64 %add, %conv4
+  %6 = load ptr, ptr %a.addr, align 4
+  %arrayidx6 = getelementptr inbounds i16, ptr %6, i32 3
+  %7 = load i16, ptr %arrayidx6, align 2
+  %conv7 = zext i16 %7 to i64
+  %add8 = add nsw i64 %add5, %conv7
+  ret i64 %add8
+}
+
+define  i64 @vadd_8_64(ptr %a) {
+; CHECK-LABEL: define range(i64 0, 1021) i64 @vadd_8_64(
+; CHECK-SAME: ptr nocapture readonly [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[A]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i16>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[TMP1]])
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[TMP2]] to i64
+; CHECK-NEXT:    ret i64 [[TMP3]]
+;
+entry:
+  %a.addr = alloca ptr, align 4
+  store ptr %a, ptr %a.addr, align 4
+  %0 = load ptr, ptr %a.addr, align 4
+  %arrayidx = getelementptr inbounds i8, ptr %0, i32 0
+  %1 = load i8, ptr %arrayidx, align 1
+  %conv = zext i8 %1 to i64
+  %2 = load ptr, ptr %a.addr, align 4
+  %arrayidx1 = getelementptr inbounds i8, ptr %2, i32 1
+  %3 = load i8, ptr %arrayidx1, align 1
+  %conv2 = zext i8 %3 to i64
+  %add = add nsw i64 %conv, %conv2
+  %4 = load ptr, ptr %a.addr, align 4
+  %arrayidx3 = getelementptr inbounds i8, ptr %4, i32 2
+  %5 = load i8, ptr %arrayidx3, align 1
+  %conv4 = zext i8 %5 to i64
+  %add5 = add nsw i64 %add, %conv4
+  %6 = load ptr, ptr %a.addr, align 4
+  %arrayidx6 = getelementptr inbounds i8, ptr %6, i32 3
+  %7 = load i8, ptr %arrayidx6, align 1
+  %conv7 = zext i8 %7 to i64
+  %add8 = add nsw i64 %add5, %conv7
+  ret i64 %add8
+}
+
+define  i32 @vadd_16_32(ptr %a) {
+; CHECK-LABEL: define range(i32 -131072, 131069) i32 @vadd_16_32(
+; CHECK-SAME: ptr nocapture readonly [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 2
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i16> [[TMP0]] to <4 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]])
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+entry:
+  %a.addr = alloca ptr, align 4
+  store ptr %a, ptr %a.addr, align 4
+  %0 = load ptr, ptr %a.addr, align 4
+  %arrayidx = getelementptr inbounds i16, ptr %0, i32 0
+  %1 = load i16, ptr %arrayidx, align 2
+  %conv = sext i16 %1 to i32
+  %2 = load ptr, ptr %a.addr, align 4
+  %arrayidx1 = getelementptr inbounds i16, ptr %2, i32 1
+  %3 = load i16, ptr %arrayidx1, align 2
+  %conv2 = sext i16 %3 to i32
+  %add = add nsw i32 %conv, %conv2
+  %4 = load ptr, ptr %a.addr, align 4
+  %arrayidx3 = getelementptr inbounds i16, ptr %4, i32 2
+  %5 = load i16, ptr %arrayidx3, align 2
+  %conv4 = sext i16 %5 to i32
+  %add5 = add nsw i32 %add, %conv4
+  %6 = load ptr, ptr %a.addr, align 4
+  %arrayidx6 = getelementptr inbounds i16, ptr %6, i32 3
+  %7 = load i16, ptr %arrayidx6, align 2
+  %conv7 = sext i16 %7 to i32
+  %add8 = add nsw i32 %add5, %conv7
+  ret i32 %add8
+}
+
+define  i32 @vadd_8_32(ptr %a) {
+; CHECK-LABEL: define range(i32 -512, 509) i32 @vadd_8_32(
+; CHECK-SAME: ptr nocapture readonly [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[A]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i8> [[TMP0]] to <4 x i16>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[TMP1]])
+; CHECK-NEXT:    [[TMP3:%.*]] = sext i16 [[TMP2]] to i32
+; CHECK-NEXT:    ret i32 [[TMP3]]
+;
+entry:
+  %a.addr = alloca ptr, align 4
+  store ptr %a, ptr %a.addr, align 4
+  %0 = load ptr, ptr %a.addr, align 4
+  %arrayidx = getelementptr inbounds i8, ptr %0, i32 0
+  %1 = load i8, ptr %arrayidx, align 1
+  %conv = sext i8 %1 to i32
+  %2 = load ptr, ptr %a.addr, align 4
+  %arrayidx1 = getelementptr inbounds i8, ptr %2, i32 1
+  %3 = load i8, ptr %arrayidx1, align 1
+  %conv2 = sext i8 %3 to i32
+  %add = add nsw i32 %conv, %conv2
+  %4 = load ptr, ptr %a.addr, align 4
+  %arrayidx3 = getelementptr inbounds i8, ptr %4, i32 2
+  %5 = load i8, ptr %arrayidx3, align 1
+  %conv4 = sext i8 %5 to i32
+  %add5 = add nsw i32 %add, %conv4
+  %6 = load ptr, ptr %a.addr, align 4
+  %arrayidx6 = getelementptr inbounds i8, ptr %6, i32 3
+  %7 = load i8, ptr %arrayidx6, align 1
+  %conv7 = sext i8 %7 to i32
+  %add8 = add nsw i32 %add5, %conv7
+  ret i32 %add8
+}
+
+define  signext i16 @vadd_8_16(ptr %a) {
+; CHECK-LABEL: define signext range(i16 -512, 509) i16 @vadd_8_16(
+; CHECK-SAME: ptr nocapture readonly [[A:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[A]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i8> [[TMP0]] to <4 x i16>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[TMP1]])
+; CHECK-NEXT:    ret i16 [[TMP2]]
+;
+entry:
+  %a.addr = alloca ptr, align 4
+  store ptr %a, ptr %a.addr, align 4
+  %0 = load ptr, ptr %a.addr, align 4
+  %arrayidx = getelementptr inbounds i8, ptr %0, i32 0
+  %1 = load i8, ptr %arrayidx, align 1
+  %conv = sext i8 %1 to i16
+  %conv1 = sext i16 %conv to i32
+  %2 = load ptr, ptr %a.addr, align 4
+  %arrayidx2 = getelementptr inbounds i8, ptr %2, i32 1
+  %3 = load i8, ptr %arrayidx2, align 1
+  %conv3 = sext i8 %3 to i32
+  %add = add nsw i32 %conv1, %conv3
+  %4 = load ptr, ptr %a.addr, align 4
+  %arrayidx4 = getelementptr inbounds i8, ptr %4, i32 2
+  %5 = load i8, ptr %arrayidx4, align 1
+  %conv5 = sext i8 %5 to i32
+  %add6 = add nsw i32 %add, %conv5
+  %6 = load ptr, ptr %a.addr, align 4
+  %arrayidx7 = getelementptr inbounds i8, ptr %6, i32 3
+  %7 = load i8, ptr %arrayidx7, align 1
+  %conv8 = sext i8 %7 to i32
+  %add9 = add nsw i32 %add6, %conv8
+  %conv10 = trunc i32 %add9 to i16
+  ret i16 %conv10
+}

>From ac951159c1cca20665610f976ce41ef6d40412ce Mon Sep 17 00:00:00 2001
From: nasmnc01 <nashe.mncube at arm.com>
Date: Wed, 22 Jan 2025 11:26:27 +0000
Subject: [PATCH 3/3] Don't allow illegal vector extension

Change-Id: Ib7059f675aca92fa27f9153247ee90b2d7bad214
---
 .../lib/Target/ARM/ARMTargetTransformInfo.cpp |  29 +---
 .../Transforms/SLPVectorizer/ARM/vadd-mve.ll  |  17 +-
 .../Transforms/SLPVectorizer/ARM/vadd-mve.s   | 161 ++++++++++++++++++
 3 files changed, 177 insertions(+), 30 deletions(-)
 create mode 100644 llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.s

diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index cd186fcb1e88ca..5768f010f304f4 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1791,31 +1791,6 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
 
   int ISD = TLI->InstructionOpcodeToISD(Opcode);
 
-  auto CastCost = [=]() -> unsigned {
-    // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
-    // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
-    // are linearised so take more.
-    static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
-        {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1},
-        {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1},
-        {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2},
-        {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2},
-        {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1},
-        {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1},
-        {ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1},
-        {ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1},
-    };
-
-    if (ST->hasMVEIntegerOps()) {
-      if (const auto *Entry = ConvertCostTableLookup(
-              MVEVectorConversionTbl,
-              (IsUnsigned) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
-              ResVT.getSimpleVT(), ValVT.getSimpleVT()))
-        return Entry->Cost;
-    }
-    return 0;
-  };
-
   switch (ISD) {
   case ISD::ADD:
     if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
@@ -1830,8 +1805,8 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
       if (ValVT.getSizeInBits() <= 128 &&
           ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
            (LT.second == MVT::v8i16 && RevVTSize <= 32) ||
-           (LT.second == MVT::v4i32 && RevVTSize <= 64)))
-        return CastCost() + ST->getMVEVectorCostFactor(CostKind) * LT.first;
+           (LT.second == MVT::v4i32 && RevVTSize <= 32)))
+        return ST->getMVEVectorCostFactor(CostKind) * LT.first;
     }
     break;
   default:
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll b/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll
index 6a36e1af188138..7e97390f15ba3d 100644
--- a/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll
+++ b/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll
@@ -5,9 +5,20 @@ define  i64 @vadd_32_64(ptr %a) {
 ; CHECK-LABEL: define range(i64 -8589934592, 8589934589) i64 @vadd_32_64(
 ; CHECK-SAME: ptr nocapture readonly [[A:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i32> [[TMP0]] to <4 x i64>
-; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[TMP0]] to i64
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[CONV2]], [[CONV]]
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT:    [[CONV4:%.*]] = sext i32 [[TMP4]] to i64
+; CHECK-NEXT:    [[ADD5:%.*]] = add nsw i64 [[ADD]], [[CONV4]]
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 12
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
+; CHECK-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = add nsw i64 [[ADD5]], [[CONV7]]
 ; CHECK-NEXT:    ret i64 [[TMP2]]
 ;
 entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.s b/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.s
new file mode 100644
index 00000000000000..bbad5c6fcc9aa2
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.s
@@ -0,0 +1,161 @@
+	.text
+	.syntax unified
+	.eabi_attribute	67, "2.09"	@ Tag_conformance
+	.eabi_attribute	6, 21	@ Tag_CPU_arch
+	.eabi_attribute	8, 1	@ Tag_ARM_ISA_use
+	.eabi_attribute	9, 3	@ Tag_THUMB_ISA_use
+	.eabi_attribute	48, 1	@ Tag_MVE_arch
+	.eabi_attribute	46, 1	@ Tag_DSP_extension
+	.eabi_attribute	34, 1	@ Tag_CPU_unaligned_access
+	.eabi_attribute	17, 1	@ Tag_ABI_PCS_GOT_use
+	.eabi_attribute	20, 1	@ Tag_ABI_FP_denormal
+	.eabi_attribute	21, 1	@ Tag_ABI_FP_exceptions
+	.eabi_attribute	23, 3	@ Tag_ABI_FP_number_model
+	.eabi_attribute	24, 1	@ Tag_ABI_align_needed
+	.eabi_attribute	25, 1	@ Tag_ABI_align_preserved
+	.eabi_attribute	38, 1	@ Tag_ABI_FP_16bit_format
+	.eabi_attribute	14, 0	@ Tag_ABI_PCS_R9_use
+	.file	"vadd-mve.ll"
+	.globl	vadd_32_64                      @ -- Begin function vadd_32_64
+	.p2align	2
+	.type	vadd_32_64,%function
+	.code	32                              @ @vadd_32_64
+vadd_32_64:
+	.fnstart
+@ %bb.0:                                @ %entry
+	.pad	#4
+	sub	sp, sp, #4
+	ldm	r0, {r1, r2, r3, r12}
+	str	r0, [sp]
+	asr	r0, r1, #31
+	adds	r1, r1, r2
+	adc	r0, r0, r2, asr #31
+	adds	r1, r1, r3
+	adc	r2, r0, r3, asr #31
+	adds	r0, r1, r12
+	adc	r1, r2, r12, asr #31
+	add	sp, sp, #4
+	bx	lr
+.Lfunc_end0:
+	.size	vadd_32_64, .Lfunc_end0-vadd_32_64
+	.fnend
+                                        @ -- End function
+	.globl	vadd_16_64                      @ -- Begin function vadd_16_64
+	.p2align	2
+	.type	vadd_16_64,%function
+	.code	32                              @ @vadd_16_64
+vadd_16_64:
+	.fnstart
+@ %bb.0:                                @ %entry
+	.pad	#4
+	sub	sp, sp, #4
+	ldrh	r1, [r0, #2]
+	ldrh	r2, [r0]
+	str	r0, [sp]
+	add	r1, r2, r1
+	ldrh	r2, [r0, #4]
+	ldrh	r0, [r0, #6]
+	add	r1, r1, r2
+	add	r0, r1, r0
+	mov	r1, #0
+	add	sp, sp, #4
+	bx	lr
+.Lfunc_end1:
+	.size	vadd_16_64, .Lfunc_end1-vadd_16_64
+	.fnend
+                                        @ -- End function
+	.globl	vadd_8_64                       @ -- Begin function vadd_8_64
+	.p2align	2
+	.type	vadd_8_64,%function
+	.code	32                              @ @vadd_8_64
+vadd_8_64:
+	.fnstart
+@ %bb.0:                                @ %entry
+	.pad	#4
+	sub	sp, sp, #4
+	ldrb	r1, [r0]
+	ldrb	r2, [r0, #1]
+	ldrb	r3, [r0, #2]
+	add	r1, r1, r2
+	str	r0, [sp]
+	ldrb	r0, [r0, #3]
+	add	r1, r1, r3
+	add	r0, r1, r0
+	mov	r1, #0
+	add	sp, sp, #4
+	bx	lr
+.Lfunc_end2:
+	.size	vadd_8_64, .Lfunc_end2-vadd_8_64
+	.fnend
+                                        @ -- End function
+	.globl	vadd_16_32                      @ -- Begin function vadd_16_32
+	.p2align	2
+	.type	vadd_16_32,%function
+	.code	32                              @ @vadd_16_32
+vadd_16_32:
+	.fnstart
+@ %bb.0:                                @ %entry
+	.pad	#4
+	sub	sp, sp, #4
+	ldrsh	r1, [r0, #2]
+	ldrsh	r2, [r0]
+	str	r0, [sp]
+	add	r1, r2, r1
+	ldrsh	r2, [r0, #4]
+	ldrsh	r0, [r0, #6]
+	add	r1, r1, r2
+	add	r0, r1, r0
+	add	sp, sp, #4
+	bx	lr
+.Lfunc_end3:
+	.size	vadd_16_32, .Lfunc_end3-vadd_16_32
+	.fnend
+                                        @ -- End function
+	.globl	vadd_8_32                       @ -- Begin function vadd_8_32
+	.p2align	2
+	.type	vadd_8_32,%function
+	.code	32                              @ @vadd_8_32
+vadd_8_32:
+	.fnstart
+@ %bb.0:                                @ %entry
+	.pad	#4
+	sub	sp, sp, #4
+	ldrsb	r1, [r0, #1]
+	ldrsb	r2, [r0]
+	str	r0, [sp]
+	add	r1, r2, r1
+	ldrsb	r2, [r0, #2]
+	ldrsb	r0, [r0, #3]
+	add	r1, r1, r2
+	add	r0, r1, r0
+	add	sp, sp, #4
+	bx	lr
+.Lfunc_end4:
+	.size	vadd_8_32, .Lfunc_end4-vadd_8_32
+	.fnend
+                                        @ -- End function
+	.globl	vadd_8_16                       @ -- Begin function vadd_8_16
+	.p2align	2
+	.type	vadd_8_16,%function
+	.code	32                              @ @vadd_8_16
+vadd_8_16:
+	.fnstart
+@ %bb.0:                                @ %entry
+	.pad	#4
+	sub	sp, sp, #4
+	ldrsb	r1, [r0, #1]
+	ldrsb	r2, [r0]
+	str	r0, [sp]
+	add	r1, r2, r1
+	ldrsb	r2, [r0, #2]
+	ldrsb	r0, [r0, #3]
+	add	r1, r1, r2
+	add	r0, r1, r0
+	add	sp, sp, #4
+	bx	lr
+.Lfunc_end5:
+	.size	vadd_8_16, .Lfunc_end5-vadd_8_16
+	.fnend
+                                        @ -- End function
+	.section	".note.GNU-stack","",%progbits
+	.eabi_attribute	30, 1	@ Tag_ABI_optimization_goals



More information about the llvm-commits mailing list