[llvm] [ARM][SLP] Adjust cost returned for vadd reduction (PR #122713)

Nashe Mncube via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 12 09:33:35 PDT 2025


https://github.com/nasherm updated https://github.com/llvm/llvm-project/pull/122713

>From bb3c2fc397652b97fd496530d41a495c4549b42b Mon Sep 17 00:00:00 2001
From: nasmnc01 <nashe.mncube at arm.com>
Date: Thu, 9 Jan 2025 14:04:39 +0000
Subject: [PATCH 1/3] [ARM][SLP] Fix incorrect cost function for SLP
 Vectorization of ZExt/SExt

PR #117350 made changes to the SLP vectorizer which introduced
a regression on ARM vectorization benchmarks. This was due
to the changes assuming that SExt/ZExt vector instructions have
constant cost. This behaviour is expected for RISCV but not on ARM
where we take into account source and destination type of SExt/ZExt
instructions when calculating vector cost.

Change-Id: I6f995dcde26e5aaf62b779b63e52988fb333f941
---
 .../lib/Target/ARM/ARMTargetTransformInfo.cpp |  1 -
 .../Transforms/SLPVectorizer/ARM/vadd-mve.ll  | 29 +++++++++++++++++++
 2 files changed, 29 insertions(+), 1 deletion(-)
 create mode 100644 llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll

diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 2a2a46a19d7c1..7e9373af0b7cb 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1794,7 +1794,6 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
   case ISD::ADD:
     if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
       std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
-
       // The legal cases are:
       //   VADDV u/s 8/16/32
       //   VADDLV u/s 32
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll b/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll
new file mode 100644
index 0000000000000..e0af0ca1e4f8b
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll
@@ -0,0 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=slp-vectorizer --mtriple arm-none-eabi -mattr=+mve -S -o - | FileCheck %s
+
+define i64 @vadd_32_64(ptr readonly %a) {
+; CHECK-LABEL: define i64 @vadd_32_64(
+; CHECK-SAME: ptr readonly [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i32> [[TMP0]] to <4 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+entry:
+  %0 = load i32, ptr %a, align 4
+  %conv = sext i32 %0 to i64
+  %arrayidx1 = getelementptr inbounds nuw i8, ptr %a, i32 4
+  %1 = load i32, ptr %arrayidx1, align 4
+  %conv2 = sext i32 %1 to i64
+  %add = add nsw i64 %conv2, %conv
+  %arrayidx3 = getelementptr inbounds nuw i8, ptr %a, i32 8
+  %2 = load i32, ptr %arrayidx3, align 4
+  %conv4 = sext i32 %2 to i64
+  %add5 = add nsw i64 %add, %conv4
+  %arrayidx6 = getelementptr inbounds nuw i8, ptr %a, i32 12
+  %3 = load i32, ptr %arrayidx6, align 4
+  %conv7 = sext i32 %3 to i64
+  %add8 = add nsw i64 %add5, %conv7
+  ret i64 %add8
+}

>From 1921822b8d034a5f4617867e56a0f67e38fa5544 Mon Sep 17 00:00:00 2001
From: nasmnc01 <nashe.mncube at arm.com>
Date: Thu, 30 Jan 2025 11:51:01 +0000
Subject: [PATCH 2/3] Adjust extended reduction cost

Change-Id: Ie2795e0e5ebb0589146eaf07c752410e307a36e6
---
 llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 7e9373af0b7cb..b1152396fab0e 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1794,6 +1794,7 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
   case ISD::ADD:
     if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
       std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
+
       // The legal cases are:
       //   VADDV u/s 8/16/32
       //   VADDLV u/s 32
@@ -1805,7 +1806,7 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
           ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
            (LT.second == MVT::v8i16 && RevVTSize <= 32) ||
            (LT.second == MVT::v4i32 && RevVTSize <= 64)))
-        return ST->getMVEVectorCostFactor(CostKind) * LT.first;
+        return 3 * ST->getMVEVectorCostFactor(CostKind) * LT.first;
     }
     break;
   default:

>From a602844898a8f5d022d73955aab73c2f65953c0d Mon Sep 17 00:00:00 2001
From: nasmnc01 <nashe.mncube at arm.com>
Date: Wed, 5 Feb 2025 15:57:57 +0000
Subject: [PATCH 3/3] [ARM]Reduce scalar cost of SMLAL muls

Changes to the SLPVectorizer caused a regression
in some benchmarks which targetted cores that support
both DSP and MVE instructions. The particular regression
has been reduced to a case where MUL instructions that
are part of a chain of instructions that can be replaced
with DSP SMLAL may also be vectorized. The generated
code ends up being an inefficient of both scalar
and vector ops rather than leaning to one or the other.
By reducing the cost of these MUL instructions in these
patterns we recover lost performance.

Change-Id: I302817cf4fcd18a11d40fba430c44e034a36448b
---
 .../lib/Target/ARM/ARMTargetTransformInfo.cpp | 56 +++++++++++++++++--
 .../CostModel/ARM/muls-in-smlal-patterns.ll   | 37 ++++++++++++
 .../Transforms/SLPVectorizer/ARM/vadd-mve.ll  | 29 ----------
 3 files changed, 88 insertions(+), 34 deletions(-)
 create mode 100644 llvm/test/Analysis/CostModel/ARM/muls-in-smlal-patterns.ll
 delete mode 100644 llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll

diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index b1152396fab0e..8ece8bbf4e4a9 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1458,16 +1458,62 @@ InstructionCost ARMTTIImpl::getArithmeticInstrCost(
   if (LooksLikeAFreeShift())
     return 0;
 
+  // When targets have both DSP and MVE we find that the
+  // the compiler will attempt to vectorize as well as using
+  // scalar SMLAL operations. This is in cases where we have
+  // the pattern ext(mul(ext(i16), ext(i16))) we find
+  // that generated codegen performs better when only using SMLAL scalar
+  // ops instead of trying to mix vector ops with SMLAL ops. We therefore
+  // check if a mul instruction is used in a SMLAL pattern.
+  auto MulInSMLALPattern = [&](const Instruction *I, unsigned Opcode,
+                               Type *Ty) -> bool {
+    if (!ST->hasDSP() || !ST->hasMVEIntegerOps())
+      return false;
+    if (!I)
+      return false;
+
+    if (Opcode != Instruction::Mul)
+      return false;
+
+    if (Ty->isVectorTy())
+      return false;
+
+    auto IsSExtInst = [](const Value *V) -> bool {
+      return (dyn_cast<SExtInst>(V)) ? true : false;
+    };
+
+    // We check the arguments of the function to see if they're extends
+    auto *BinOp = dyn_cast<BinaryOperator>(I);
+    if (!BinOp)
+      return false;
+    auto *Op0 = BinOp->getOperand(0);
+    auto *Op1 = BinOp->getOperand(1);
+    if (Op0 && Op1 && IsSExtInst(Op0) && IsSExtInst(Op1)) {
+      // In this case we're interested in an ext of an i16
+      if (!Op0->getType()->isIntegerTy(32) || !Op1->getType()->isIntegerTy(32))
+        return false;
+      // We need to check if this result will be further extended to i64
+      for (auto *U : I->users())
+        if (IsSExtInst(dyn_cast<Value>(U)))
+          return true;
+    }
+
+    return false;
+  };
+
+  if (MulInSMLALPattern(CxtI, Opcode, Ty))
+    return 0;
+
   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
   // for "multiple beats" potentially needed by MVE instructions.
   int BaseCost = 1;
   if (ST->hasMVEIntegerOps() && Ty->isVectorTy())
     BaseCost = ST->getMVEVectorCostFactor(CostKind);
 
-  // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
-  // without treating floats as more expensive that scalars or increasing the
-  // costs for custom operations. The results is also multiplied by the
-  // MVEVectorCostFactor where appropriate.
+  // The rest of this mostly follows what is done in
+  // BaseT::getArithmeticInstrCost, without treating floats as more expensive
+  // that scalars or increasing the costs for custom operations. The results is
+  // also multiplied by the MVEVectorCostFactor where appropriate.
   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
     return LT.first * BaseCost;
 
@@ -1806,7 +1852,7 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
           ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
            (LT.second == MVT::v8i16 && RevVTSize <= 32) ||
            (LT.second == MVT::v4i32 && RevVTSize <= 64)))
-        return 3 * ST->getMVEVectorCostFactor(CostKind) * LT.first;
+        return ST->getMVEVectorCostFactor(CostKind) * LT.first;
     }
     break;
   default:
diff --git a/llvm/test/Analysis/CostModel/ARM/muls-in-smlal-patterns.ll b/llvm/test/Analysis/CostModel/ARM/muls-in-smlal-patterns.ll
new file mode 100644
index 0000000000000..eafd378ab0aad
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/ARM/muls-in-smlal-patterns.ll
@@ -0,0 +1,37 @@
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple thumbv8.1-m.main -mattr=+mve,+dsp  < %s | FileCheck %s
+define i64 @test(i16 %a, i16 %b) {
+; CHECK-LABEL: 'test'
+; CHECK:  Cost Model: Found an estimated cost of 0 for instruction: %m = mul i32 %as, %bs
+;
+    %as = sext i16 %a to i32
+    %bs = sext i16 %b to i32
+    %m = mul i32 %as, %bs
+    %ms = sext i32 %m to i64
+    ret i64 %ms
+}
+
+define i64 @withadd(i16 %a, i16 %b, i64 %c) {
+; CHECK-LABEL: 'withadd'
+; CHECK:  Cost Model: Found an estimated cost of 0 for instruction: %m = mul i32 %as, %bs
+;
+    %as = sext i16 %a to i32
+    %bs = sext i16 %b to i32
+    %m = mul i32 %as, %bs
+    %ms = sext i32 %m to i64
+    %r = add i64 %c, %ms
+    ret i64 %r
+}
+
+define i64 @withloads(ptr %pa, ptr %pb, i64 %c) {
+; CHECK-LABEL: 'withloads'
+; CHECK:  Cost Model: Found an estimated cost of 0 for instruction: %m = mul i32 %as, %bs
+;
+    %a = load i16, ptr %pa
+    %b = load i16, ptr %pb
+    %as = sext i16 %a to i32
+    %bs = sext i16 %b to i32
+    %m = mul i32 %as, %bs
+    %ms = sext i32 %m to i64
+    %r = add i64 %c, %ms
+    ret i64 %r
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll b/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll
deleted file mode 100644
index e0af0ca1e4f8b..0000000000000
--- a/llvm/test/Transforms/SLPVectorizer/ARM/vadd-mve.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -passes=slp-vectorizer --mtriple arm-none-eabi -mattr=+mve -S -o - | FileCheck %s
-
-define i64 @vadd_32_64(ptr readonly %a) {
-; CHECK-LABEL: define i64 @vadd_32_64(
-; CHECK-SAME: ptr readonly [[A:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i32> [[TMP0]] to <4 x i64>
-; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT:    ret i64 [[TMP2]]
-;
-entry:
-  %0 = load i32, ptr %a, align 4
-  %conv = sext i32 %0 to i64
-  %arrayidx1 = getelementptr inbounds nuw i8, ptr %a, i32 4
-  %1 = load i32, ptr %arrayidx1, align 4
-  %conv2 = sext i32 %1 to i64
-  %add = add nsw i64 %conv2, %conv
-  %arrayidx3 = getelementptr inbounds nuw i8, ptr %a, i32 8
-  %2 = load i32, ptr %arrayidx3, align 4
-  %conv4 = sext i32 %2 to i64
-  %add5 = add nsw i64 %add, %conv4
-  %arrayidx6 = getelementptr inbounds nuw i8, ptr %a, i32 12
-  %3 = load i32, ptr %arrayidx6, align 4
-  %conv7 = sext i32 %3 to i64
-  %add8 = add nsw i64 %add5, %conv7
-  ret i64 %add8
-}



More information about the llvm-commits mailing list