[llvm] [TTI][WebAssembly] Pairwise reduction expansion (PR #93948)

via llvm-commits llvm-commits at lists.llvm.org
Fri May 31 03:49:07 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-analysis

@llvm/pr-subscribers-llvm-transforms

Author: Sam Parker (sparker-arm)

<details>
<summary>Changes</summary>

WebAssembly doesn't support horizontal operations nor does it have a way of expressing fast-math or reassoc flags, so runtimes are currently unable to use pairwise operations when generating code from the existing shuffle patterns.

This patch allows the backend to select which, arbitary, shuffle pattern to be used per reduction intrinsic. The default behaviour is the same as the existing, which is by splitting the vector into a top and bottom half. The other pattern introduced is for a pairwise shuffle.

WebAssembly enables pairwise reductions for int/fp add/sub.

---
Full diff: https://github.com/llvm/llvm-project/pull/93948.diff


9 Files Affected:

- (modified) llvm/include/llvm/Analysis/TargetTransformInfo.h (+17) 
- (modified) llvm/include/llvm/Analysis/TargetTransformInfoImpl.h (+5) 
- (modified) llvm/include/llvm/Transforms/Utils/LoopUtils.h (+2) 
- (modified) llvm/lib/Analysis/TargetTransformInfo.cpp (+6) 
- (modified) llvm/lib/CodeGen/ExpandReductions.cpp (+6-4) 
- (modified) llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp (+13) 
- (modified) llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h (+2) 
- (modified) llvm/lib/Transforms/Utils/LoopUtils.cpp (+29-12) 
- (added) llvm/test/CodeGen/WebAssembly/vector-reduce.ll (+130) 


``````````diff
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index cefce93f9e25c..00658428e96d1 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1696,6 +1696,16 @@ class TargetTransformInfo {
   /// into a shuffle sequence.
   bool shouldExpandReduction(const IntrinsicInst *II) const;
 
+  enum struct ReductionShuffle {
+    SplitHalf,
+    Pairwise
+  };
+
+  /// \returns The shuffle sequence pattern used to expand the given reduction
+  /// intrinsic.
+  ReductionShuffle getPreferredExpandedReductionShuffle(
+      const IntrinsicInst *II) const;
+
   /// \returns the size cost of rematerializing a GlobalValue address relative
   /// to a stack reload.
   unsigned getGISelRematGlobalCost() const;
@@ -2145,6 +2155,8 @@ class TargetTransformInfo::Concept {
   virtual bool preferEpilogueVectorization() const = 0;
 
   virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
+  virtual ReductionShuffle
+    getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const = 0;
   virtual unsigned getGISelRematGlobalCost() const = 0;
   virtual unsigned getMinTripCountTailFoldingThreshold() const = 0;
   virtual bool enableScalableVectorization() const = 0;
@@ -2881,6 +2893,11 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
     return Impl.shouldExpandReduction(II);
   }
 
+  ReductionShuffle
+  getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const override {
+    return Impl.getPreferredExpandedReductionShuffle(II);
+  }
+
   unsigned getGISelRematGlobalCost() const override {
     return Impl.getGISelRematGlobalCost();
   }
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 9a57331d281db..ab83f7b88b6b1 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -927,6 +927,11 @@ class TargetTransformInfoImplBase {
 
   bool shouldExpandReduction(const IntrinsicInst *II) const { return true; }
 
+  TTI::ReductionShuffle
+  getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const {
+      return TTI::ReductionShuffle::SplitHalf;
+  }
+
   unsigned getGISelRematGlobalCost() const { return 1; }
 
   unsigned getMinTripCountTailFoldingThreshold() const { return 0; }
diff --git a/llvm/include/llvm/Transforms/Utils/LoopUtils.h b/llvm/include/llvm/Transforms/Utils/LoopUtils.h
index 345e09dce0b2b..9b26ad0b2fc8c 100644
--- a/llvm/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/llvm/include/llvm/Transforms/Utils/LoopUtils.h
@@ -15,6 +15,7 @@
 
 #include "llvm/Analysis/IVDescriptors.h"
 #include "llvm/Analysis/LoopAccessAnalysis.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
 #include "llvm/Transforms/Utils/ValueMapper.h"
 
 namespace llvm {
@@ -384,6 +385,7 @@ Value *getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
 /// Generates a vector reduction using shufflevectors to reduce the value.
 /// Fast-math-flags are propagated using the IRBuilder's setting.
 Value *getShuffleReduction(IRBuilderBase &Builder, Value *Src, unsigned Op,
+                           TargetTransformInfo::ReductionShuffle RS,
                            RecurKind MinMaxKind = RecurKind::None);
 
 /// Create a target reduction of the given vector. The reduction operation
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 82b6d7e7c4833..7d37b222d9f00 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1309,6 +1309,12 @@ bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const {
   return TTIImpl->shouldExpandReduction(II);
 }
 
+TargetTransformInfo::ReductionShuffle
+TargetTransformInfo::getPreferredExpandedReductionShuffle(
+    const IntrinsicInst *II) const {
+  return TTIImpl->getPreferredExpandedReductionShuffle(II);
+}
+
 unsigned TargetTransformInfo::getGISelRematGlobalCost() const {
   return TTIImpl->getGISelRematGlobalCost();
 }
diff --git a/llvm/lib/CodeGen/ExpandReductions.cpp b/llvm/lib/CodeGen/ExpandReductions.cpp
index 0b1504e51b1bb..cb7e73b04f812 100644
--- a/llvm/lib/CodeGen/ExpandReductions.cpp
+++ b/llvm/lib/CodeGen/ExpandReductions.cpp
@@ -59,6 +59,8 @@ bool expandReductions(Function &F, const TargetTransformInfo *TTI) {
         isa<FPMathOperator>(II) ? II->getFastMathFlags() : FastMathFlags{};
     Intrinsic::ID ID = II->getIntrinsicID();
     RecurKind RK = getMinMaxReductionRecurKind(ID);
+    TargetTransformInfo::ReductionShuffle RS =
+      TTI->getPreferredExpandedReductionShuffle(II);
 
     Value *Rdx = nullptr;
     IRBuilder<> Builder(II);
@@ -79,7 +81,7 @@ bool expandReductions(Function &F, const TargetTransformInfo *TTI) {
         if (!isPowerOf2_32(
                 cast<FixedVectorType>(Vec->getType())->getNumElements()))
           continue;
-        Rdx = getShuffleReduction(Builder, Vec, RdxOpcode, RK);
+        Rdx = getShuffleReduction(Builder, Vec, RdxOpcode, RS, RK);
         Rdx = Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, Acc, Rdx,
                                   "bin.rdx");
       }
@@ -112,7 +114,7 @@ bool expandReductions(Function &F, const TargetTransformInfo *TTI) {
         break;
       }
       unsigned RdxOpcode = getArithmeticReductionInstruction(ID);
-      Rdx = getShuffleReduction(Builder, Vec, RdxOpcode, RK);
+      Rdx = getShuffleReduction(Builder, Vec, RdxOpcode, RS, RK);
       break;
     }
     case Intrinsic::vector_reduce_add:
@@ -127,7 +129,7 @@ bool expandReductions(Function &F, const TargetTransformInfo *TTI) {
               cast<FixedVectorType>(Vec->getType())->getNumElements()))
         continue;
       unsigned RdxOpcode = getArithmeticReductionInstruction(ID);
-      Rdx = getShuffleReduction(Builder, Vec, RdxOpcode, RK);
+      Rdx = getShuffleReduction(Builder, Vec, RdxOpcode, RS, RK);
       break;
     }
     case Intrinsic::vector_reduce_fmax:
@@ -140,7 +142,7 @@ bool expandReductions(Function &F, const TargetTransformInfo *TTI) {
           !FMF.noNaNs())
         continue;
       unsigned RdxOpcode = getArithmeticReductionInstruction(ID);
-      Rdx = getShuffleReduction(Builder, Vec, RdxOpcode, RK);
+      Rdx = getShuffleReduction(Builder, Vec, RdxOpcode, RS, RK);
       break;
     }
     }
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
index 9a434d9b1db54..a286dfc9d4b62 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
@@ -94,6 +94,19 @@ WebAssemblyTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
   return Cost;
 }
 
+TTI::ReductionShuffle WebAssemblyTTIImpl::getPreferredExpandedReductionShuffle(
+    const IntrinsicInst *II) const {
+
+  switch (II->getIntrinsicID()) {
+  default:
+    break;
+  case Intrinsic::vector_reduce_add:
+  case Intrinsic::vector_reduce_fadd:
+    return TTI::ReductionShuffle::Pairwise;
+  }
+  return TTI::ReductionShuffle::SplitHalf;
+}
+
 bool WebAssemblyTTIImpl::areInlineCompatible(const Function *Caller,
                                              const Function *Callee) const {
   // Allow inlining only when the Callee has a subset of the Caller's
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
index 801f905d377ed..32e9af00f35ce 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
@@ -70,6 +70,8 @@ class WebAssemblyTTIImpl final : public BasicTTIImplBase<WebAssemblyTTIImpl> {
                                      TTI::TargetCostKind CostKind,
                                      unsigned Index, Value *Op0, Value *Op1);
 
+  TTI::ReductionShuffle
+  getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const;
   /// @}
 
   bool areInlineCompatible(const Function *Caller,
diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp
index cc883a7dc2927..03e687679eb91 100644
--- a/llvm/lib/Transforms/Utils/LoopUtils.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp
@@ -1077,7 +1077,9 @@ Value *llvm::getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
 
 // Helper to generate a log2 shuffle reduction.
 Value *llvm::getShuffleReduction(IRBuilderBase &Builder, Value *Src,
-                                 unsigned Op, RecurKind RdxKind) {
+                                 unsigned Op,
+                                 TargetTransformInfo::ReductionShuffle RS,
+                                 RecurKind RdxKind) {
   unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements();
   // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles
   // and vector ops, reducing the set of values being computed by half each
@@ -1091,18 +1093,9 @@ Value *llvm::getShuffleReduction(IRBuilderBase &Builder, Value *Src,
   // will never be relevant here.  Note that it would be generally unsound to
   // propagate these from an intrinsic call to the expansion anyways as we/
   // change the order of operations.
-  Value *TmpVec = Src;
-  SmallVector<int, 32> ShuffleMask(VF);
-  for (unsigned i = VF; i != 1; i >>= 1) {
-    // Move the upper half of the vector to the lower half.
-    for (unsigned j = 0; j != i / 2; ++j)
-      ShuffleMask[j] = i / 2 + j;
-
-    // Fill the rest of the mask with undef.
-    std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), -1);
-
+  auto BuildShuffledOp = [&Builder, &Op, &RdxKind](
+      SmallVectorImpl<int> &ShuffleMask, Value*& TmpVec) -> void {
     Value *Shuf = Builder.CreateShuffleVector(TmpVec, ShuffleMask, "rdx.shuf");
-
     if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
       TmpVec = Builder.CreateBinOp((Instruction::BinaryOps)Op, TmpVec, Shuf,
                                    "bin.rdx");
@@ -1111,6 +1104,30 @@ Value *llvm::getShuffleReduction(IRBuilderBase &Builder, Value *Src,
              "Invalid min/max");
       TmpVec = createMinMaxOp(Builder, RdxKind, TmpVec, Shuf);
     }
+  };
+
+  Value *TmpVec = Src;
+  if (TargetTransformInfo::ReductionShuffle::Pairwise == RS) {
+    SmallVector<int, 32> ShuffleMask(VF);
+    for (unsigned stride = 1; stride < VF; stride <<= 1) {
+      // Initialise the mask with undef.
+      std::fill(ShuffleMask.begin(), ShuffleMask.end(), -1);
+      for (unsigned j = 0; j < VF; j += stride << 1) {
+        ShuffleMask[j] = j + stride;
+      }
+      BuildShuffledOp(ShuffleMask, TmpVec);
+    }
+  } else {
+    SmallVector<int, 32> ShuffleMask(VF);
+    for (unsigned i = VF; i != 1; i >>= 1) {
+      // Move the upper half of the vector to the lower half.
+      for (unsigned j = 0; j != i / 2; ++j)
+        ShuffleMask[j] = i / 2 + j;
+
+      // Fill the rest of the mask with undef.
+      std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), -1);
+      BuildShuffledOp(ShuffleMask, TmpVec);
+    }
   }
   // The result is in the first element of the vector.
   return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
diff --git a/llvm/test/CodeGen/WebAssembly/vector-reduce.ll b/llvm/test/CodeGen/WebAssembly/vector-reduce.ll
new file mode 100644
index 0000000000000..620e11cda7792
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/vector-reduce.ll
@@ -0,0 +1,130 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=wasm32 -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 | FileCheck %s --check-prefix=SIMD128
+
+define i64 @pairwise_add_v2i64(<2 x i64> %arg) {
+; SIMD128-LABEL: pairwise_add_v2i64:
+; SIMD128:         .functype pairwise_add_v2i64 (v128) -> (i64)
+; SIMD128-NEXT:  # %bb.0:
+; SIMD128-NEXT:    i8x16.shuffle $push0=, $0, $0, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7
+; SIMD128-NEXT:    i64x2.add $push1=, $0, $pop0
+; SIMD128-NEXT:    i64x2.extract_lane $push2=, $pop1, 0
+; SIMD128-NEXT:    return $pop2
+  %res = tail call i64 @llvm.vector.reduce.add.i64.v4i64(<2 x i64> %arg)
+  ret i64 %res
+}
+
+define i32 @pairwise_add_v4i32(<4 x i32> %arg) {
+; SIMD128-LABEL: pairwise_add_v4i32:
+; SIMD128:         .functype pairwise_add_v4i32 (v128) -> (i32)
+; SIMD128-NEXT:  # %bb.0:
+; SIMD128-NEXT:    i8x16.shuffle $push0=, $0, $0, 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 0, 1, 2, 3
+; SIMD128-NEXT:    i32x4.add $push5=, $0, $pop0
+; SIMD128-NEXT:    local.tee $push4=, $0=, $pop5
+; SIMD128-NEXT:    i8x16.shuffle $push1=, $0, $0, 8, 9, 10, 11, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+; SIMD128-NEXT:    i32x4.add $push2=, $pop4, $pop1
+; SIMD128-NEXT:    i32x4.extract_lane $push3=, $pop2, 0
+; SIMD128-NEXT:    return $pop3
+  %res = tail call i32 @llvm.vector.reduce.add.i32.v4f32(<4 x i32> %arg)
+  ret i32 %res
+}
+
+define i16 @pairwise_add_v8i16(<8 x i16> %arg) {
+; SIMD128-LABEL: pairwise_add_v8i16:
+; SIMD128:         .functype pairwise_add_v8i16 (v128) -> (i32)
+; SIMD128-NEXT:  # %bb.0:
+; SIMD128-NEXT:    i8x16.shuffle $push0=, $0, $0, 2, 3, 0, 1, 6, 7, 0, 1, 10, 11, 0, 1, 14, 15, 0, 1
+; SIMD128-NEXT:    i16x8.add $push8=, $0, $pop0
+; SIMD128-NEXT:    local.tee $push7=, $0=, $pop8
+; SIMD128-NEXT:    i8x16.shuffle $push1=, $0, $0, 4, 5, 0, 1, 0, 1, 0, 1, 12, 13, 0, 1, 0, 1, 0, 1
+; SIMD128-NEXT:    i16x8.add $push6=, $pop7, $pop1
+; SIMD128-NEXT:    local.tee $push5=, $0=, $pop6
+; SIMD128-NEXT:    i8x16.shuffle $push2=, $0, $0, 8, 9, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+; SIMD128-NEXT:    i16x8.add $push3=, $pop5, $pop2
+; SIMD128-NEXT:    i16x8.extract_lane_u $push4=, $pop3, 0
+; SIMD128-NEXT:    return $pop4
+  %res = tail call i16 @llvm.vector.reduce.add.i16.v8i16(<8 x i16> %arg)
+  ret i16 %res
+}
+
+define i8 @pairwise_add_v16i8(<16 x i8> %arg) {
+; SIMD128-LABEL: pairwise_add_v16i8:
+; SIMD128:         .functype pairwise_add_v16i8 (v128) -> (i32)
+; SIMD128-NEXT:  # %bb.0:
+; SIMD128-NEXT:    i8x16.shuffle $push0=, $0, $0, 1, 0, 3, 0, 5, 0, 7, 0, 9, 0, 11, 0, 13, 0, 15, 0
+; SIMD128-NEXT:    i8x16.add $push11=, $0, $pop0
+; SIMD128-NEXT:    local.tee $push10=, $0=, $pop11
+; SIMD128-NEXT:    i8x16.shuffle $push1=, $0, $0, 2, 0, 0, 0, 6, 0, 0, 0, 10, 0, 0, 0, 14, 0, 0, 0
+; SIMD128-NEXT:    i8x16.add $push9=, $pop10, $pop1
+; SIMD128-NEXT:    local.tee $push8=, $0=, $pop9
+; SIMD128-NEXT:    i8x16.shuffle $push2=, $0, $0, 4, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0
+; SIMD128-NEXT:    i8x16.add $push7=, $pop8, $pop2
+; SIMD128-NEXT:    local.tee $push6=, $0=, $pop7
+; SIMD128-NEXT:    i8x16.shuffle $push3=, $0, $0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; SIMD128-NEXT:    i8x16.add $push4=, $pop6, $pop3
+; SIMD128-NEXT:    i8x16.extract_lane_u $push5=, $pop4, 0
+; SIMD128-NEXT:    return $pop5
+  %res = tail call i8 @llvm.vector.reduce.add.i8.v16i8(<16 x i8> %arg)
+  ret i8 %res
+}
+
+define double @pairwise_add_v2f64(<2 x double> %arg) {
+; SIMD128-LABEL: pairwise_add_v2f64:
+; SIMD128:         .functype pairwise_add_v2f64 (v128) -> (f64)
+; SIMD128-NEXT:  # %bb.0:
+; SIMD128-NEXT:    f64x2.extract_lane $push1=, $0, 0
+; SIMD128-NEXT:    f64x2.extract_lane $push0=, $0, 1
+; SIMD128-NEXT:    f64.add $push2=, $pop1, $pop0
+; SIMD128-NEXT:    return $pop2
+  %res = tail call double @llvm.vector.reduce.fadd.f64.v2f64(double -0.0, <2 x double> %arg)
+  ret double%res
+}
+
+define double @pairwise_add_v2f64_fast(<2 x double> %arg) {
+; SIMD128-LABEL: pairwise_add_v2f64_fast:
+; SIMD128:         .functype pairwise_add_v2f64_fast (v128) -> (f64)
+; SIMD128-NEXT:  # %bb.0:
+; SIMD128-NEXT:    i8x16.shuffle $push0=, $0, $0, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7
+; SIMD128-NEXT:    f64x2.add $push1=, $0, $pop0
+; SIMD128-NEXT:    f64x2.extract_lane $push2=, $pop1, 0
+; SIMD128-NEXT:    return $pop2
+  %res = tail call fast double @llvm.vector.reduce.fadd.f64.v2f64(double -0.0, <2 x double> %arg)
+  ret double%res
+}
+
+define float @pairwise_add_v4f32(<4 x float> %arg) {
+; SIMD128-LABEL: pairwise_add_v4f32:
+; SIMD128:         .functype pairwise_add_v4f32 (v128) -> (f32)
+; SIMD128-NEXT:  # %bb.0:
+; SIMD128-NEXT:    f32x4.extract_lane $push1=, $0, 0
+; SIMD128-NEXT:    f32x4.extract_lane $push0=, $0, 1
+; SIMD128-NEXT:    f32.add $push2=, $pop1, $pop0
+; SIMD128-NEXT:    f32x4.extract_lane $push3=, $0, 2
+; SIMD128-NEXT:    f32.add $push4=, $pop2, $pop3
+; SIMD128-NEXT:    f32x4.extract_lane $push5=, $0, 3
+; SIMD128-NEXT:    f32.add $push6=, $pop4, $pop5
+; SIMD128-NEXT:    return $pop6
+  %res = tail call float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %arg)
+  ret float %res
+}
+
+define float @pairwise_add_v4f32_fast(<4 x float> %arg) {
+; SIMD128-LABEL: pairwise_add_v4f32_fast:
+; SIMD128:         .functype pairwise_add_v4f32_fast (v128) -> (f32)
+; SIMD128-NEXT:  # %bb.0:
+; SIMD128-NEXT:    i8x16.shuffle $push0=, $0, $0, 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 0, 1, 2, 3
+; SIMD128-NEXT:    f32x4.add $push5=, $0, $pop0
+; SIMD128-NEXT:    local.tee $push4=, $0=, $pop5
+; SIMD128-NEXT:    i8x16.shuffle $push1=, $0, $0, 8, 9, 10, 11, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+; SIMD128-NEXT:    f32x4.add $push2=, $pop4, $pop1
+; SIMD128-NEXT:    f32x4.extract_lane $push3=, $pop2, 0
+; SIMD128-NEXT:    return $pop3
+  %res = tail call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %arg)
+  ret float %res
+}
+
+declare i64 @llvm.vector.reduce.add.i64.v4i64(<2 x i64>)
+declare i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32>)
+declare i16 @llvm.vector.reduce.add.i16.v8i16(<8 x i16>)
+declare i8 @llvm.vector.reduce.add.i8.v16i8(<16 x i8>)
+declare double @llvm.vector.reduce.fadd.f64.v2f64(double, <2 x double>)
+declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>)

``````````

</details>


https://github.com/llvm/llvm-project/pull/93948


More information about the llvm-commits mailing list