[llvm] e5f4019 - [AArch64] Add extending reduction costs for addlv and dot

David Green via llvm-commits llvm-commits at lists.llvm.org
Sat Feb 15 12:33:07 PST 2025


Author: David Green
Date: 2025-02-15T20:33:03Z
New Revision: e5f4019f69948f55b77fcb5f63ae8c296418432c

URL: https://github.com/llvm/llvm-project/commit/e5f4019f69948f55b77fcb5f63ae8c296418432c
DIFF: https://github.com/llvm/llvm-project/commit/e5f4019f69948f55b77fcb5f63ae8c296418432c.diff

LOG: [AArch64] Add extending reduction costs for addlv and dot

This adds some basic getExtendedReductionCost and getMulAccReductionCost to
account for add reduction (uaddlv/saddlv) and mla reductions with dotprod.

Added: 
    llvm/test/Transforms/SLPVectorizer/AArch64/vecreduceadd.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
    llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
    llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index a8cf4aba6186d..9e286a91cae3b 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -4635,6 +4635,54 @@ AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
   return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
 }
 
+InstructionCost AArch64TTIImpl::getExtendedReductionCost(
+    unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *VecTy,
+    FastMathFlags FMF, TTI::TargetCostKind CostKind) {
+  EVT VecVT = TLI->getValueType(DL, VecTy);
+  EVT ResVT = TLI->getValueType(DL, ResTy);
+
+  if (Opcode == Instruction::Add && VecVT.isSimple() && ResVT.isSimple() &&
+      VecVT.getSizeInBits() >= 64) {
+    std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VecTy);
+
+    // The legal cases are:
+    //   UADDLV 8/16/32->32
+    //   UADDLP 32->64
+    unsigned RevVTSize = ResVT.getSizeInBits();
+    if (((LT.second == MVT::v8i8 || LT.second == MVT::v16i8) &&
+         RevVTSize <= 32) ||
+        ((LT.second == MVT::v4i16 || LT.second == MVT::v8i16) &&
+         RevVTSize <= 32) ||
+        ((LT.second == MVT::v2i32 || LT.second == MVT::v4i32) &&
+         RevVTSize <= 64))
+      return (LT.first - 1) * 2 + 2;
+  }
+
+  return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, VecTy, FMF,
+                                         CostKind);
+}
+
+InstructionCost
+AArch64TTIImpl::getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
+                                       VectorType *VecTy,
+                                       TTI::TargetCostKind CostKind) {
+  EVT VecVT = TLI->getValueType(DL, VecTy);
+  EVT ResVT = TLI->getValueType(DL, ResTy);
+
+  if (ST->hasDotProd() && VecVT.isSimple() && ResVT.isSimple()) {
+    std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VecTy);
+
+    // The legal cases with dotprod are
+    //   UDOT 8->32
+    // Which requires an additional uaddv to sum the i32 values.
+    if ((LT.second == MVT::v8i8 || LT.second == MVT::v16i8) &&
+         ResVT == MVT::i32)
+      return LT.first + 2;
+  }
+
+  return BaseT::getMulAccReductionCost(IsUnsigned, ResTy, VecTy, CostKind);
+}
+
 InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) {
   static const CostTblEntry ShuffleTbl[] = {
       { TTI::SK_Splice, MVT::nxv16i8,  1 },

diff  --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index 481cb5511a331..c7f8450213ae5 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -425,6 +425,15 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
                                              std::optional<FastMathFlags> FMF,
                                              TTI::TargetCostKind CostKind);
 
+  InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
+                                           Type *ResTy, VectorType *ValTy,
+                                           FastMathFlags FMF,
+                                           TTI::TargetCostKind CostKind);
+
+  InstructionCost getMulAccReductionCost(
+      bool IsUnsigned, Type *ResTy, VectorType *Ty,
+      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput);
+
   InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
                                  ArrayRef<int> Mask,
                                  TTI::TargetCostKind CostKind, int Index,

diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
index 7c5f9847db1f4..8dc0181425625 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll
@@ -228,7 +228,7 @@ for.end:                                          ; preds = %for.end.loopexit, %
 ; YAML-NEXT: Function:        test_unrolled_select
 ; YAML-NEXT: Args:
 ; YAML-NEXT:   - String:          'Vectorized horizontal reduction with cost '
-; YAML-NEXT:   - Cost:            '-41'
+; YAML-NEXT:   - Cost:            '-44'
 ; YAML-NEXT:   - String:          ' and with tree size '
 ; YAML-NEXT:   - TreeSize:        '10'
 

diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vecreduceadd.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vecreduceadd.ll
new file mode 100644
index 0000000000000..36826eb6681c8
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vecreduceadd.ll
@@ -0,0 +1,1151 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=slp-vectorizer -slp-threshold=-2 -S -pass-remarks-output=%t < %s | FileCheck %s
+; RUN: cat %t | FileCheck -check-prefix=COST %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "aarch64"
+
+; COST-LABEL: Function:  sext_v4i8_i16
+; COST: Cost:            '-2'
+define i16 @sext_v4i8_i16(ptr %x) {
+; CHECK-LABEL: @sext_v4i8_i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[X:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i8> [[TMP0]] to <4 x i16>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[TMP1]])
+; CHECK-NEXT:    ret i16 [[TMP2]]
+;
+entry:
+  %0 = load i8, ptr %x
+  %conv = sext i8 %0 to i16
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 1
+  %1 = load i8, ptr %arrayidx.1
+  %conv.1 = sext i8 %1 to i16
+  %add.1 = add nsw i16 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %2 = load i8, ptr %arrayidx.2
+  %conv.2 = sext i8 %2 to i16
+  %add.2 = add nsw i16 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 3
+  %3 = load i8, ptr %arrayidx.3
+  %conv.3 = sext i8 %3 to i16
+  %add.3 = add nsw i16 %add.2, %conv.3
+  ret i16 %add.3
+}
+
+; COST-LABEL: Function:  sext_v8i8_i16
+; COST: Cost:            '-12'
+define i16 @sext_v8i8_i16(ptr %x) {
+; CHECK-LABEL: @sext_v8i8_i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[X:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <8 x i8> [[TMP0]] to <8 x i16>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[TMP1]])
+; CHECK-NEXT:    ret i16 [[TMP2]]
+;
+entry:
+  %0 = load i8, ptr %x
+  %conv = sext i8 %0 to i16
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 1
+  %1 = load i8, ptr %arrayidx.1
+  %conv.1 = sext i8 %1 to i16
+  %add.1 = add nsw i16 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %2 = load i8, ptr %arrayidx.2
+  %conv.2 = sext i8 %2 to i16
+  %add.2 = add nsw i16 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 3
+  %3 = load i8, ptr %arrayidx.3
+  %conv.3 = sext i8 %3 to i16
+  %add.3 = add nsw i16 %add.2, %conv.3
+  %arrayidx.4 = getelementptr inbounds nuw i8, ptr %x, i64 4
+  %4 = load i8, ptr %arrayidx.4
+  %conv.4 = sext i8 %4 to i16
+  %add.4 = add nsw i16 %add.3, %conv.4
+  %arrayidx.5 = getelementptr inbounds nuw i8, ptr %x, i64 5
+  %5 = load i8, ptr %arrayidx.5
+  %conv.5 = sext i8 %5 to i16
+  %add.5 = add nsw i16 %add.4, %conv.5
+  %arrayidx.6 = getelementptr inbounds nuw i8, ptr %x, i64 6
+  %6 = load i8, ptr %arrayidx.6
+  %conv.6 = sext i8 %6 to i16
+  %add.6 = add nsw i16 %add.5, %conv.6
+  %arrayidx.7 = getelementptr inbounds nuw i8, ptr %x, i64 7
+  %7 = load i8, ptr %arrayidx.7
+  %conv.7 = sext i8 %7 to i16
+  %add.7 = add nsw i16 %add.6, %conv.7
+  ret i16 %add.7
+}
+
+; COST-LABEL: Function:  sext_v16i8_i16
+; COST: Cost:            '-28'
+define i16 @sext_v16i8_i16(ptr %x) {
+; CHECK-LABEL: @sext_v16i8_i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[X:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <16 x i8> [[TMP0]] to <16 x i16>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[TMP1]])
+; CHECK-NEXT:    ret i16 [[TMP2]]
+;
+entry:
+  %0 = load i8, ptr %x
+  %conv = sext i8 %0 to i16
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 1
+  %1 = load i8, ptr %arrayidx.1
+  %conv.1 = sext i8 %1 to i16
+  %add.1 = add nsw i16 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %2 = load i8, ptr %arrayidx.2
+  %conv.2 = sext i8 %2 to i16
+  %add.2 = add nsw i16 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 3
+  %3 = load i8, ptr %arrayidx.3
+  %conv.3 = sext i8 %3 to i16
+  %add.3 = add nsw i16 %add.2, %conv.3
+  %arrayidx.4 = getelementptr inbounds nuw i8, ptr %x, i64 4
+  %4 = load i8, ptr %arrayidx.4
+  %conv.4 = sext i8 %4 to i16
+  %add.4 = add nsw i16 %add.3, %conv.4
+  %arrayidx.5 = getelementptr inbounds nuw i8, ptr %x, i64 5
+  %5 = load i8, ptr %arrayidx.5
+  %conv.5 = sext i8 %5 to i16
+  %add.5 = add nsw i16 %add.4, %conv.5
+  %arrayidx.6 = getelementptr inbounds nuw i8, ptr %x, i64 6
+  %6 = load i8, ptr %arrayidx.6
+  %conv.6 = sext i8 %6 to i16
+  %add.6 = add nsw i16 %add.5, %conv.6
+  %arrayidx.7 = getelementptr inbounds nuw i8, ptr %x, i64 7
+  %7 = load i8, ptr %arrayidx.7
+  %conv.7 = sext i8 %7 to i16
+  %add.7 = add nsw i16 %add.6, %conv.7
+  %arrayidx.8 = getelementptr inbounds nuw i8, ptr %x, i64 8
+  %8 = load i8, ptr %arrayidx.8
+  %conv.8 = sext i8 %8 to i16
+  %add.8 = add nsw i16 %add.7, %conv.8
+  %arrayidx.9 = getelementptr inbounds nuw i8, ptr %x, i64 9
+  %9 = load i8, ptr %arrayidx.9
+  %conv.9 = sext i8 %9 to i16
+  %add.9 = add nsw i16 %add.8, %conv.9
+  %arrayidx.10 = getelementptr inbounds nuw i8, ptr %x, i64 10
+  %10 = load i8, ptr %arrayidx.10
+  %conv.10 = sext i8 %10 to i16
+  %add.10 = add nsw i16 %add.9, %conv.10
+  %arrayidx.11 = getelementptr inbounds nuw i8, ptr %x, i64 11
+  %11 = load i8, ptr %arrayidx.11
+  %conv.11 = sext i8 %11 to i16
+  %add.11 = add nsw i16 %add.10, %conv.11
+  %arrayidx.12 = getelementptr inbounds nuw i8, ptr %x, i64 12
+  %12 = load i8, ptr %arrayidx.12
+  %conv.12 = sext i8 %12 to i16
+  %add.12 = add nsw i16 %add.11, %conv.12
+  %arrayidx.13 = getelementptr inbounds nuw i8, ptr %x, i64 13
+  %13 = load i8, ptr %arrayidx.13
+  %conv.13 = sext i8 %13 to i16
+  %add.13 = add nsw i16 %add.12, %conv.13
+  %arrayidx.14 = getelementptr inbounds nuw i8, ptr %x, i64 14
+  %14 = load i8, ptr %arrayidx.14
+  %conv.14 = sext i8 %14 to i16
+  %add.14 = add nsw i16 %add.13, %conv.14
+  %arrayidx.15 = getelementptr inbounds nuw i8, ptr %x, i64 15
+  %15 = load i8, ptr %arrayidx.15
+  %conv.15 = sext i8 %15 to i16
+  %add.15 = add nsw i16 %add.14, %conv.15
+  ret i16 %add.15
+}
+
+; COST-LABEL: Function:  sext_v32i8_i16
+; COST: Cost:            '-57'
+define i16 @sext_v32i8_i16(ptr %x) {
+; CHECK-LABEL: @sext_v32i8_i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <32 x i8>, ptr [[X:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <32 x i8> [[TMP0]] to <32 x i16>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> [[TMP1]])
+; CHECK-NEXT:    ret i16 [[TMP2]]
+;
+entry:
+  %0 = load i8, ptr %x
+  %conv = sext i8 %0 to i16
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 1
+  %1 = load i8, ptr %arrayidx.1
+  %conv.1 = sext i8 %1 to i16
+  %add.1 = add nsw i16 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %2 = load i8, ptr %arrayidx.2
+  %conv.2 = sext i8 %2 to i16
+  %add.2 = add nsw i16 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 3
+  %3 = load i8, ptr %arrayidx.3
+  %conv.3 = sext i8 %3 to i16
+  %add.3 = add nsw i16 %add.2, %conv.3
+  %arrayidx.4 = getelementptr inbounds nuw i8, ptr %x, i64 4
+  %4 = load i8, ptr %arrayidx.4
+  %conv.4 = sext i8 %4 to i16
+  %add.4 = add nsw i16 %add.3, %conv.4
+  %arrayidx.5 = getelementptr inbounds nuw i8, ptr %x, i64 5
+  %5 = load i8, ptr %arrayidx.5
+  %conv.5 = sext i8 %5 to i16
+  %add.5 = add nsw i16 %add.4, %conv.5
+  %arrayidx.6 = getelementptr inbounds nuw i8, ptr %x, i64 6
+  %6 = load i8, ptr %arrayidx.6
+  %conv.6 = sext i8 %6 to i16
+  %add.6 = add nsw i16 %add.5, %conv.6
+  %arrayidx.7 = getelementptr inbounds nuw i8, ptr %x, i64 7
+  %7 = load i8, ptr %arrayidx.7
+  %conv.7 = sext i8 %7 to i16
+  %add.7 = add nsw i16 %add.6, %conv.7
+  %arrayidx.8 = getelementptr inbounds nuw i8, ptr %x, i64 8
+  %8 = load i8, ptr %arrayidx.8
+  %conv.8 = sext i8 %8 to i16
+  %add.8 = add nsw i16 %add.7, %conv.8
+  %arrayidx.9 = getelementptr inbounds nuw i8, ptr %x, i64 9
+  %9 = load i8, ptr %arrayidx.9
+  %conv.9 = sext i8 %9 to i16
+  %add.9 = add nsw i16 %add.8, %conv.9
+  %arrayidx.10 = getelementptr inbounds nuw i8, ptr %x, i64 10
+  %10 = load i8, ptr %arrayidx.10
+  %conv.10 = sext i8 %10 to i16
+  %add.10 = add nsw i16 %add.9, %conv.10
+  %arrayidx.11 = getelementptr inbounds nuw i8, ptr %x, i64 11
+  %11 = load i8, ptr %arrayidx.11
+  %conv.11 = sext i8 %11 to i16
+  %add.11 = add nsw i16 %add.10, %conv.11
+  %arrayidx.12 = getelementptr inbounds nuw i8, ptr %x, i64 12
+  %12 = load i8, ptr %arrayidx.12
+  %conv.12 = sext i8 %12 to i16
+  %add.12 = add nsw i16 %add.11, %conv.12
+  %arrayidx.13 = getelementptr inbounds nuw i8, ptr %x, i64 13
+  %13 = load i8, ptr %arrayidx.13
+  %conv.13 = sext i8 %13 to i16
+  %add.13 = add nsw i16 %add.12, %conv.13
+  %arrayidx.14 = getelementptr inbounds nuw i8, ptr %x, i64 14
+  %14 = load i8, ptr %arrayidx.14
+  %conv.14 = sext i8 %14 to i16
+  %add.14 = add nsw i16 %add.13, %conv.14
+  %arrayidx.15 = getelementptr inbounds nuw i8, ptr %x, i64 15
+  %15 = load i8, ptr %arrayidx.15
+  %conv.15 = sext i8 %15 to i16
+  %add.15 = add nsw i16 %add.14, %conv.15
+  %arrayidx.16 = getelementptr inbounds nuw i8, ptr %x, i64 16
+  %16 = load i8, ptr %arrayidx.16
+  %conv.16 = sext i8 %16 to i16
+  %add.16 = add nsw i16 %add.15, %conv.16
+  %arrayidx.17 = getelementptr inbounds nuw i8, ptr %x, i64 17
+  %17 = load i8, ptr %arrayidx.17
+  %conv.17 = sext i8 %17 to i16
+  %add.17 = add nsw i16 %add.16, %conv.17
+  %arrayidx.18 = getelementptr inbounds nuw i8, ptr %x, i64 18
+  %18 = load i8, ptr %arrayidx.18
+  %conv.18 = sext i8 %18 to i16
+  %add.18 = add nsw i16 %add.17, %conv.18
+  %arrayidx.19 = getelementptr inbounds nuw i8, ptr %x, i64 19
+  %19 = load i8, ptr %arrayidx.19
+  %conv.19 = sext i8 %19 to i16
+  %add.19 = add nsw i16 %add.18, %conv.19
+  %arrayidx.20 = getelementptr inbounds nuw i8, ptr %x, i64 20
+  %20 = load i8, ptr %arrayidx.20
+  %conv.20 = sext i8 %20 to i16
+  %add.20 = add nsw i16 %add.19, %conv.20
+  %arrayidx.21 = getelementptr inbounds nuw i8, ptr %x, i64 21
+  %21 = load i8, ptr %arrayidx.21
+  %conv.21 = sext i8 %21 to i16
+  %add.21 = add nsw i16 %add.20, %conv.21
+  %arrayidx.22 = getelementptr inbounds nuw i8, ptr %x, i64 22
+  %22 = load i8, ptr %arrayidx.22
+  %conv.22 = sext i8 %22 to i16
+  %add.22 = add nsw i16 %add.21, %conv.22
+  %arrayidx.23 = getelementptr inbounds nuw i8, ptr %x, i64 23
+  %23 = load i8, ptr %arrayidx.23
+  %conv.23 = sext i8 %23 to i16
+  %add.23 = add nsw i16 %add.22, %conv.23
+  %arrayidx.24 = getelementptr inbounds nuw i8, ptr %x, i64 24
+  %24 = load i8, ptr %arrayidx.24
+  %conv.24 = sext i8 %24 to i16
+  %add.24 = add nsw i16 %add.23, %conv.24
+  %arrayidx.25 = getelementptr inbounds nuw i8, ptr %x, i64 25
+  %25 = load i8, ptr %arrayidx.25
+  %conv.25 = sext i8 %25 to i16
+  %add.25 = add nsw i16 %add.24, %conv.25
+  %arrayidx.26 = getelementptr inbounds nuw i8, ptr %x, i64 26
+  %26 = load i8, ptr %arrayidx.26
+  %conv.26 = sext i8 %26 to i16
+  %add.26 = add nsw i16 %add.25, %conv.26
+  %arrayidx.27 = getelementptr inbounds nuw i8, ptr %x, i64 27
+  %27 = load i8, ptr %arrayidx.27
+  %conv.27 = sext i8 %27 to i16
+  %add.27 = add nsw i16 %add.26, %conv.27
+  %arrayidx.28 = getelementptr inbounds nuw i8, ptr %x, i64 28
+  %28 = load i8, ptr %arrayidx.28
+  %conv.28 = sext i8 %28 to i16
+  %add.28 = add nsw i16 %add.27, %conv.28
+  %arrayidx.29 = getelementptr inbounds nuw i8, ptr %x, i64 29
+  %29 = load i8, ptr %arrayidx.29
+  %conv.29 = sext i8 %29 to i16
+  %add.29 = add nsw i16 %add.28, %conv.29
+  %arrayidx.30 = getelementptr inbounds nuw i8, ptr %x, i64 30
+  %30 = load i8, ptr %arrayidx.30
+  %conv.30 = sext i8 %30 to i16
+  %add.30 = add nsw i16 %add.29, %conv.30
+  %arrayidx.31 = getelementptr inbounds nuw i8, ptr %x, i64 31
+  %31 = load i8, ptr %arrayidx.31
+  %conv.31 = sext i8 %31 to i16
+  %add.31 = add nsw i16 %add.30, %conv.31
+  ret i16 %add.31
+}
+
+
+
+
+
+
+; COST-LABEL: Function:  sext_v4i16_i32
+; COST: Cost:            '-4'
+define i32 @sext_v4i16_i32(ptr %x) {
+; CHECK-LABEL: @sext_v4i16_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[X:%.*]], align 2
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i16> [[TMP0]] to <4 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]])
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+entry:
+  %0 = load i16, ptr %x
+  %conv = sext i16 %0 to i32
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %1 = load i16, ptr %arrayidx.1
+  %conv.1 = sext i16 %1 to i32
+  %add.1 = add nsw i32 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 4
+  %2 = load i16, ptr %arrayidx.2
+  %conv.2 = sext i16 %2 to i32
+  %add.2 = add nsw i32 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 6
+  %3 = load i16, ptr %arrayidx.3
+  %conv.3 = sext i16 %3 to i32
+  %add.3 = add nsw i32 %add.2, %conv.3
+  ret i32 %add.3
+}
+
+; COST-LABEL: Function:  sext_v8i16_i32
+; COST: Cost:            '-12'
+define i32 @sext_v8i16_i32(ptr %x) {
+; CHECK-LABEL: @sext_v8i16_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i16>, ptr [[X:%.*]], align 2
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <8 x i16> [[TMP0]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP1]])
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+entry:
+  %0 = load i16, ptr %x
+  %conv = sext i16 %0 to i32
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %1 = load i16, ptr %arrayidx.1
+  %conv.1 = sext i16 %1 to i32
+  %add.1 = add nsw i32 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 4
+  %2 = load i16, ptr %arrayidx.2
+  %conv.2 = sext i16 %2 to i32
+  %add.2 = add nsw i32 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 6
+  %3 = load i16, ptr %arrayidx.3
+  %conv.3 = sext i16 %3 to i32
+  %add.3 = add nsw i32 %add.2, %conv.3
+  %arrayidx.4 = getelementptr inbounds nuw i8, ptr %x, i64 8
+  %4 = load i16, ptr %arrayidx.4
+  %conv.4 = sext i16 %4 to i32
+  %add.4 = add nsw i32 %add.3, %conv.4
+  %arrayidx.5 = getelementptr inbounds nuw i8, ptr %x, i64 10
+  %5 = load i16, ptr %arrayidx.5
+  %conv.5 = sext i16 %5 to i32
+  %add.5 = add nsw i32 %add.4, %conv.5
+  %arrayidx.6 = getelementptr inbounds nuw i8, ptr %x, i64 12
+  %6 = load i16, ptr %arrayidx.6
+  %conv.6 = sext i16 %6 to i32
+  %add.6 = add nsw i32 %add.5, %conv.6
+  %arrayidx.7 = getelementptr inbounds nuw i8, ptr %x, i64 14
+  %7 = load i16, ptr %arrayidx.7
+  %conv.7 = sext i16 %7 to i32
+  %add.7 = add nsw i32 %add.6, %conv.7
+  ret i32 %add.7
+}
+
+; COST-LABEL: Function:  sext_v16i16_i32
+; COST: Cost:            '-25'
+define i32 @sext_v16i16_i32(ptr %x) {
+; CHECK-LABEL: @sext_v16i16_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i16>, ptr [[X:%.*]], align 2
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <16 x i16> [[TMP0]] to <16 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP1]])
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+entry:
+  %0 = load i16, ptr %x
+  %conv = sext i16 %0 to i32
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %1 = load i16, ptr %arrayidx.1
+  %conv.1 = sext i16 %1 to i32
+  %add.1 = add nsw i32 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 4
+  %2 = load i16, ptr %arrayidx.2
+  %conv.2 = sext i16 %2 to i32
+  %add.2 = add nsw i32 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 6
+  %3 = load i16, ptr %arrayidx.3
+  %conv.3 = sext i16 %3 to i32
+  %add.3 = add nsw i32 %add.2, %conv.3
+  %arrayidx.4 = getelementptr inbounds nuw i8, ptr %x, i64 8
+  %4 = load i16, ptr %arrayidx.4
+  %conv.4 = sext i16 %4 to i32
+  %add.4 = add nsw i32 %add.3, %conv.4
+  %arrayidx.5 = getelementptr inbounds nuw i8, ptr %x, i64 10
+  %5 = load i16, ptr %arrayidx.5
+  %conv.5 = sext i16 %5 to i32
+  %add.5 = add nsw i32 %add.4, %conv.5
+  %arrayidx.6 = getelementptr inbounds nuw i8, ptr %x, i64 12
+  %6 = load i16, ptr %arrayidx.6
+  %conv.6 = sext i16 %6 to i32
+  %add.6 = add nsw i32 %add.5, %conv.6
+  %arrayidx.7 = getelementptr inbounds nuw i8, ptr %x, i64 14
+  %7 = load i16, ptr %arrayidx.7
+  %conv.7 = sext i16 %7 to i32
+  %add.7 = add nsw i32 %add.6, %conv.7
+  %arrayidx.8 = getelementptr inbounds nuw i8, ptr %x, i64 16
+  %8 = load i16, ptr %arrayidx.8
+  %conv.8 = sext i16 %8 to i32
+  %add.8 = add nsw i32 %add.7, %conv.8
+  %arrayidx.9 = getelementptr inbounds nuw i8, ptr %x, i64 18
+  %9 = load i16, ptr %arrayidx.9
+  %conv.9 = sext i16 %9 to i32
+  %add.9 = add nsw i32 %add.8, %conv.9
+  %arrayidx.10 = getelementptr inbounds nuw i8, ptr %x, i64 20
+  %10 = load i16, ptr %arrayidx.10
+  %conv.10 = sext i16 %10 to i32
+  %add.10 = add nsw i32 %add.9, %conv.10
+  %arrayidx.11 = getelementptr inbounds nuw i8, ptr %x, i64 22
+  %11 = load i16, ptr %arrayidx.11
+  %conv.11 = sext i16 %11 to i32
+  %add.11 = add nsw i32 %add.10, %conv.11
+  %arrayidx.12 = getelementptr inbounds nuw i8, ptr %x, i64 24
+  %12 = load i16, ptr %arrayidx.12
+  %conv.12 = sext i16 %12 to i32
+  %add.12 = add nsw i32 %add.11, %conv.12
+  %arrayidx.13 = getelementptr inbounds nuw i8, ptr %x, i64 26
+  %13 = load i16, ptr %arrayidx.13
+  %conv.13 = sext i16 %13 to i32
+  %add.13 = add nsw i32 %add.12, %conv.13
+  %arrayidx.14 = getelementptr inbounds nuw i8, ptr %x, i64 28
+  %14 = load i16, ptr %arrayidx.14
+  %conv.14 = sext i16 %14 to i32
+  %add.14 = add nsw i32 %add.13, %conv.14
+  %arrayidx.15 = getelementptr inbounds nuw i8, ptr %x, i64 30
+  %15 = load i16, ptr %arrayidx.15
+  %conv.15 = sext i16 %15 to i32
+  %add.15 = add nsw i32 %add.14, %conv.15
+  ret i32 %add.15
+}
+
+; COST-LABEL: Function:  sext_v32i16_i32
+; COST: Cost:            '-51'
+define i32 @sext_v32i16_i32(ptr %x) {
+; CHECK-LABEL: @sext_v32i16_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <32 x i16>, ptr [[X:%.*]], align 2
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <32 x i16> [[TMP0]] to <32 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> [[TMP1]])
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+entry:
+  %0 = load i16, ptr %x
+  %conv = sext i16 %0 to i32
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %1 = load i16, ptr %arrayidx.1
+  %conv.1 = sext i16 %1 to i32
+  %add.1 = add nsw i32 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 4
+  %2 = load i16, ptr %arrayidx.2
+  %conv.2 = sext i16 %2 to i32
+  %add.2 = add nsw i32 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 6
+  %3 = load i16, ptr %arrayidx.3
+  %conv.3 = sext i16 %3 to i32
+  %add.3 = add nsw i32 %add.2, %conv.3
+  %arrayidx.4 = getelementptr inbounds nuw i8, ptr %x, i64 8
+  %4 = load i16, ptr %arrayidx.4
+  %conv.4 = sext i16 %4 to i32
+  %add.4 = add nsw i32 %add.3, %conv.4
+  %arrayidx.5 = getelementptr inbounds nuw i8, ptr %x, i64 10
+  %5 = load i16, ptr %arrayidx.5
+  %conv.5 = sext i16 %5 to i32
+  %add.5 = add nsw i32 %add.4, %conv.5
+  %arrayidx.6 = getelementptr inbounds nuw i8, ptr %x, i64 12
+  %6 = load i16, ptr %arrayidx.6
+  %conv.6 = sext i16 %6 to i32
+  %add.6 = add nsw i32 %add.5, %conv.6
+  %arrayidx.7 = getelementptr inbounds nuw i8, ptr %x, i64 14
+  %7 = load i16, ptr %arrayidx.7
+  %conv.7 = sext i16 %7 to i32
+  %add.7 = add nsw i32 %add.6, %conv.7
+  %arrayidx.8 = getelementptr inbounds nuw i8, ptr %x, i64 16
+  %8 = load i16, ptr %arrayidx.8
+  %conv.8 = sext i16 %8 to i32
+  %add.8 = add nsw i32 %add.7, %conv.8
+  %arrayidx.9 = getelementptr inbounds nuw i8, ptr %x, i64 18
+  %9 = load i16, ptr %arrayidx.9
+  %conv.9 = sext i16 %9 to i32
+  %add.9 = add nsw i32 %add.8, %conv.9
+  %arrayidx.10 = getelementptr inbounds nuw i8, ptr %x, i64 20
+  %10 = load i16, ptr %arrayidx.10
+  %conv.10 = sext i16 %10 to i32
+  %add.10 = add nsw i32 %add.9, %conv.10
+  %arrayidx.11 = getelementptr inbounds nuw i8, ptr %x, i64 22
+  %11 = load i16, ptr %arrayidx.11
+  %conv.11 = sext i16 %11 to i32
+  %add.11 = add nsw i32 %add.10, %conv.11
+  %arrayidx.12 = getelementptr inbounds nuw i8, ptr %x, i64 24
+  %12 = load i16, ptr %arrayidx.12
+  %conv.12 = sext i16 %12 to i32
+  %add.12 = add nsw i32 %add.11, %conv.12
+  %arrayidx.13 = getelementptr inbounds nuw i8, ptr %x, i64 26
+  %13 = load i16, ptr %arrayidx.13
+  %conv.13 = sext i16 %13 to i32
+  %add.13 = add nsw i32 %add.12, %conv.13
+  %arrayidx.14 = getelementptr inbounds nuw i8, ptr %x, i64 28
+  %14 = load i16, ptr %arrayidx.14
+  %conv.14 = sext i16 %14 to i32
+  %add.14 = add nsw i32 %add.13, %conv.14
+  %arrayidx.15 = getelementptr inbounds nuw i8, ptr %x, i64 30
+  %15 = load i16, ptr %arrayidx.15
+  %conv.15 = sext i16 %15 to i32
+  %add.15 = add nsw i32 %add.14, %conv.15
+  %arrayidx.16 = getelementptr inbounds nuw i8, ptr %x, i64 32
+  %16 = load i16, ptr %arrayidx.16
+  %conv.16 = sext i16 %16 to i32
+  %add.16 = add nsw i32 %add.15, %conv.16
+  %arrayidx.17 = getelementptr inbounds nuw i8, ptr %x, i64 34
+  %17 = load i16, ptr %arrayidx.17
+  %conv.17 = sext i16 %17 to i32
+  %add.17 = add nsw i32 %add.16, %conv.17
+  %arrayidx.18 = getelementptr inbounds nuw i8, ptr %x, i64 36
+  %18 = load i16, ptr %arrayidx.18
+  %conv.18 = sext i16 %18 to i32
+  %add.18 = add nsw i32 %add.17, %conv.18
+  %arrayidx.19 = getelementptr inbounds nuw i8, ptr %x, i64 38
+  %19 = load i16, ptr %arrayidx.19
+  %conv.19 = sext i16 %19 to i32
+  %add.19 = add nsw i32 %add.18, %conv.19
+  %arrayidx.20 = getelementptr inbounds nuw i8, ptr %x, i64 40
+  %20 = load i16, ptr %arrayidx.20
+  %conv.20 = sext i16 %20 to i32
+  %add.20 = add nsw i32 %add.19, %conv.20
+  %arrayidx.21 = getelementptr inbounds nuw i8, ptr %x, i64 42
+  %21 = load i16, ptr %arrayidx.21
+  %conv.21 = sext i16 %21 to i32
+  %add.21 = add nsw i32 %add.20, %conv.21
+  %arrayidx.22 = getelementptr inbounds nuw i8, ptr %x, i64 44
+  %22 = load i16, ptr %arrayidx.22
+  %conv.22 = sext i16 %22 to i32
+  %add.22 = add nsw i32 %add.21, %conv.22
+  %arrayidx.23 = getelementptr inbounds nuw i8, ptr %x, i64 46
+  %23 = load i16, ptr %arrayidx.23
+  %conv.23 = sext i16 %23 to i32
+  %add.23 = add nsw i32 %add.22, %conv.23
+  %arrayidx.24 = getelementptr inbounds nuw i8, ptr %x, i64 48
+  %24 = load i16, ptr %arrayidx.24
+  %conv.24 = sext i16 %24 to i32
+  %add.24 = add nsw i32 %add.23, %conv.24
+  %arrayidx.25 = getelementptr inbounds nuw i8, ptr %x, i64 50
+  %25 = load i16, ptr %arrayidx.25
+  %conv.25 = sext i16 %25 to i32
+  %add.25 = add nsw i32 %add.24, %conv.25
+  %arrayidx.26 = getelementptr inbounds nuw i8, ptr %x, i64 52
+  %26 = load i16, ptr %arrayidx.26
+  %conv.26 = sext i16 %26 to i32
+  %add.26 = add nsw i32 %add.25, %conv.26
+  %arrayidx.27 = getelementptr inbounds nuw i8, ptr %x, i64 54
+  %27 = load i16, ptr %arrayidx.27
+  %conv.27 = sext i16 %27 to i32
+  %add.27 = add nsw i32 %add.26, %conv.27
+  %arrayidx.28 = getelementptr inbounds nuw i8, ptr %x, i64 56
+  %28 = load i16, ptr %arrayidx.28
+  %conv.28 = sext i16 %28 to i32
+  %add.28 = add nsw i32 %add.27, %conv.28
+  %arrayidx.29 = getelementptr inbounds nuw i8, ptr %x, i64 58
+  %29 = load i16, ptr %arrayidx.29
+  %conv.29 = sext i16 %29 to i32
+  %add.29 = add nsw i32 %add.28, %conv.29
+  %arrayidx.30 = getelementptr inbounds nuw i8, ptr %x, i64 60
+  %30 = load i16, ptr %arrayidx.30
+  %conv.30 = sext i16 %30 to i32
+  %add.30 = add nsw i32 %add.29, %conv.30
+  %arrayidx.31 = getelementptr inbounds nuw i8, ptr %x, i64 62
+  %31 = load i16, ptr %arrayidx.31
+  %conv.31 = sext i16 %31 to i32
+  %add.31 = add nsw i32 %add.30, %conv.31
+  ret i32 %add.31
+}
+
+
+
+
+
+
+
+; COST-LABEL: Function:  sext_v4i32_i64
+; COST: Cost:            '-4'
+define i64 @sext_v4i32_i64(ptr %x) {
+; CHECK-LABEL: @sext_v4i32_i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[X:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i32> [[TMP0]] to <4 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+entry:
+  %0 = load i32, ptr %x
+  %conv = sext i32 %0 to i64
+  %arrayidx.1 = getelementptr inbounds nuw i32, ptr %x, i64 1
+  %1 = load i32, ptr %arrayidx.1
+  %conv.1 = sext i32 %1 to i64
+  %add.1 = add nsw i64 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i32, ptr %x, i64 2
+  %2 = load i32, ptr %arrayidx.2
+  %conv.2 = sext i32 %2 to i64
+  %add.2 = add nsw i64 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i32, ptr %x, i64 3
+  %3 = load i32, ptr %arrayidx.3
+  %conv.3 = sext i32 %3 to i64
+  %add.3 = add nsw i64 %add.2, %conv.3
+  ret i64 %add.3
+}
+
+; COST-LABEL: Function:  sext_v8i32_i64
+; COST: Cost:            '-9'
+define i64 @sext_v8i32_i64(ptr %x) {
+; CHECK-LABEL: @sext_v8i32_i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i32>, ptr [[X:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <8 x i32> [[TMP0]] to <8 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP1]])
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+entry:
+  %0 = load i32, ptr %x
+  %conv = sext i32 %0 to i64
+  %arrayidx.1 = getelementptr inbounds nuw i32, ptr %x, i64 1
+  %1 = load i32, ptr %arrayidx.1
+  %conv.1 = sext i32 %1 to i64
+  %add.1 = add nsw i64 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i32, ptr %x, i64 2
+  %2 = load i32, ptr %arrayidx.2
+  %conv.2 = sext i32 %2 to i64
+  %add.2 = add nsw i64 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i32, ptr %x, i64 3
+  %3 = load i32, ptr %arrayidx.3
+  %conv.3 = sext i32 %3 to i64
+  %add.3 = add nsw i64 %add.2, %conv.3
+  %arrayidx.4 = getelementptr inbounds nuw i32, ptr %x, i64 4
+  %4 = load i32, ptr %arrayidx.4
+  %conv.4 = sext i32 %4 to i64
+  %add.4 = add nsw i64 %add.3, %conv.4
+  %arrayidx.5 = getelementptr inbounds nuw i32, ptr %x, i64 5
+  %5 = load i32, ptr %arrayidx.5
+  %conv.5 = sext i32 %5 to i64
+  %add.5 = add nsw i64 %add.4, %conv.5
+  %arrayidx.6 = getelementptr inbounds nuw i32, ptr %x, i64 6
+  %6 = load i32, ptr %arrayidx.6
+  %conv.6 = sext i32 %6 to i64
+  %add.6 = add nsw i64 %add.5, %conv.6
+  %arrayidx.7 = getelementptr inbounds nuw i32, ptr %x, i64 7
+  %7 = load i32, ptr %arrayidx.7
+  %conv.7 = sext i32 %7 to i64
+  %add.7 = add nsw i64 %add.6, %conv.7
+  ret i64 %add.7
+}
+
+; COST-LABEL: Function:  sext_v16i32_i64
+; COST: Cost:            '-19'
+define i64 @sext_v16i32_i64(ptr %x) {
+; CHECK-LABEL: @sext_v16i32_i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i32>, ptr [[X:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <16 x i32> [[TMP0]] to <16 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP1]])
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+entry:
+  %0 = load i32, ptr %x
+  %conv = sext i32 %0 to i64
+  %arrayidx.1 = getelementptr inbounds nuw i32, ptr %x, i64 1
+  %1 = load i32, ptr %arrayidx.1
+  %conv.1 = sext i32 %1 to i64
+  %add.1 = add nsw i64 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i32, ptr %x, i64 2
+  %2 = load i32, ptr %arrayidx.2
+  %conv.2 = sext i32 %2 to i64
+  %add.2 = add nsw i64 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i32, ptr %x, i64 3
+  %3 = load i32, ptr %arrayidx.3
+  %conv.3 = sext i32 %3 to i64
+  %add.3 = add nsw i64 %add.2, %conv.3
+  %arrayidx.4 = getelementptr inbounds nuw i32, ptr %x, i64 4
+  %4 = load i32, ptr %arrayidx.4
+  %conv.4 = sext i32 %4 to i64
+  %add.4 = add nsw i64 %add.3, %conv.4
+  %arrayidx.5 = getelementptr inbounds nuw i32, ptr %x, i64 5
+  %5 = load i32, ptr %arrayidx.5
+  %conv.5 = sext i32 %5 to i64
+  %add.5 = add nsw i64 %add.4, %conv.5
+  %arrayidx.6 = getelementptr inbounds nuw i32, ptr %x, i64 6
+  %6 = load i32, ptr %arrayidx.6
+  %conv.6 = sext i32 %6 to i64
+  %add.6 = add nsw i64 %add.5, %conv.6
+  %arrayidx.7 = getelementptr inbounds nuw i32, ptr %x, i64 7
+  %7 = load i32, ptr %arrayidx.7
+  %conv.7 = sext i32 %7 to i64
+  %add.7 = add nsw i64 %add.6, %conv.7
+  %arrayidx.8 = getelementptr inbounds nuw i32, ptr %x, i64 8
+  %8 = load i32, ptr %arrayidx.8
+  %conv.8 = sext i32 %8 to i64
+  %add.8 = add nsw i64 %add.7, %conv.8
+  %arrayidx.9 = getelementptr inbounds nuw i32, ptr %x, i64 9
+  %9 = load i32, ptr %arrayidx.9
+  %conv.9 = sext i32 %9 to i64
+  %add.9 = add nsw i64 %add.8, %conv.9
+  %arrayidx.10 = getelementptr inbounds nuw i32, ptr %x, i64 10
+  %10 = load i32, ptr %arrayidx.10
+  %conv.10 = sext i32 %10 to i64
+  %add.10 = add nsw i64 %add.9, %conv.10
+  %arrayidx.11 = getelementptr inbounds nuw i32, ptr %x, i64 11
+  %11 = load i32, ptr %arrayidx.11
+  %conv.11 = sext i32 %11 to i64
+  %add.11 = add nsw i64 %add.10, %conv.11
+  %arrayidx.12 = getelementptr inbounds nuw i32, ptr %x, i64 12
+  %12 = load i32, ptr %arrayidx.12
+  %conv.12 = sext i32 %12 to i64
+  %add.12 = add nsw i64 %add.11, %conv.12
+  %arrayidx.13 = getelementptr inbounds nuw i32, ptr %x, i64 13
+  %13 = load i32, ptr %arrayidx.13
+  %conv.13 = sext i32 %13 to i64
+  %add.13 = add nsw i64 %add.12, %conv.13
+  %arrayidx.14 = getelementptr inbounds nuw i32, ptr %x, i64 14
+  %14 = load i32, ptr %arrayidx.14
+  %conv.14 = sext i32 %14 to i64
+  %add.14 = add nsw i64 %add.13, %conv.14
+  %arrayidx.15 = getelementptr inbounds nuw i32, ptr %x, i64 15
+  %15 = load i32, ptr %arrayidx.15
+  %conv.15 = sext i32 %15 to i64
+  %add.15 = add nsw i64 %add.14, %conv.15
+  ret i64 %add.15
+}
+
+; COST-LABEL: Function:  sext_v32i32_i64
+; COST: Cost:            '-39'
+define i64 @sext_v32i32_i64(ptr %x) {
+; CHECK-LABEL: @sext_v32i32_i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <32 x i32>, ptr [[X:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <32 x i32> [[TMP0]] to <32 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v32i64(<32 x i64> [[TMP1]])
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+entry:
+  %0 = load i32, ptr %x
+  %conv = sext i32 %0 to i64
+  %arrayidx.1 = getelementptr inbounds nuw i32, ptr %x, i64 1
+  %1 = load i32, ptr %arrayidx.1
+  %conv.1 = sext i32 %1 to i64
+  %add.1 = add nsw i64 %conv, %conv.1
+  %arrayidx.2 = getelementptr inbounds nuw i32, ptr %x, i64 2
+  %2 = load i32, ptr %arrayidx.2
+  %conv.2 = sext i32 %2 to i64
+  %add.2 = add nsw i64 %add.1, %conv.2
+  %arrayidx.3 = getelementptr inbounds nuw i32, ptr %x, i64 3
+  %3 = load i32, ptr %arrayidx.3
+  %conv.3 = sext i32 %3 to i64
+  %add.3 = add nsw i64 %add.2, %conv.3
+  %arrayidx.4 = getelementptr inbounds nuw i32, ptr %x, i64 4
+  %4 = load i32, ptr %arrayidx.4
+  %conv.4 = sext i32 %4 to i64
+  %add.4 = add nsw i64 %add.3, %conv.4
+  %arrayidx.5 = getelementptr inbounds nuw i32, ptr %x, i64 5
+  %5 = load i32, ptr %arrayidx.5
+  %conv.5 = sext i32 %5 to i64
+  %add.5 = add nsw i64 %add.4, %conv.5
+  %arrayidx.6 = getelementptr inbounds nuw i32, ptr %x, i64 6
+  %6 = load i32, ptr %arrayidx.6
+  %conv.6 = sext i32 %6 to i64
+  %add.6 = add nsw i64 %add.5, %conv.6
+  %arrayidx.7 = getelementptr inbounds nuw i32, ptr %x, i64 7
+  %7 = load i32, ptr %arrayidx.7
+  %conv.7 = sext i32 %7 to i64
+  %add.7 = add nsw i64 %add.6, %conv.7
+  %arrayidx.8 = getelementptr inbounds nuw i32, ptr %x, i64 8
+  %8 = load i32, ptr %arrayidx.8
+  %conv.8 = sext i32 %8 to i64
+  %add.8 = add nsw i64 %add.7, %conv.8
+  %arrayidx.9 = getelementptr inbounds nuw i32, ptr %x, i64 9
+  %9 = load i32, ptr %arrayidx.9
+  %conv.9 = sext i32 %9 to i64
+  %add.9 = add nsw i64 %add.8, %conv.9
+  %arrayidx.10 = getelementptr inbounds nuw i32, ptr %x, i64 10
+  %10 = load i32, ptr %arrayidx.10
+  %conv.10 = sext i32 %10 to i64
+  %add.10 = add nsw i64 %add.9, %conv.10
+  %arrayidx.11 = getelementptr inbounds nuw i32, ptr %x, i64 11
+  %11 = load i32, ptr %arrayidx.11
+  %conv.11 = sext i32 %11 to i64
+  %add.11 = add nsw i64 %add.10, %conv.11
+  %arrayidx.12 = getelementptr inbounds nuw i32, ptr %x, i64 12
+  %12 = load i32, ptr %arrayidx.12
+  %conv.12 = sext i32 %12 to i64
+  %add.12 = add nsw i64 %add.11, %conv.12
+  %arrayidx.13 = getelementptr inbounds nuw i32, ptr %x, i64 13
+  %13 = load i32, ptr %arrayidx.13
+  %conv.13 = sext i32 %13 to i64
+  %add.13 = add nsw i64 %add.12, %conv.13
+  %arrayidx.14 = getelementptr inbounds nuw i32, ptr %x, i64 14
+  %14 = load i32, ptr %arrayidx.14
+  %conv.14 = sext i32 %14 to i64
+  %add.14 = add nsw i64 %add.13, %conv.14
+  %arrayidx.15 = getelementptr inbounds nuw i32, ptr %x, i64 15
+  %15 = load i32, ptr %arrayidx.15
+  %conv.15 = sext i32 %15 to i64
+  %add.15 = add nsw i64 %add.14, %conv.15
+  %arrayidx.16 = getelementptr inbounds nuw i32, ptr %x, i64 16
+  %16 = load i32, ptr %arrayidx.16
+  %conv.16 = sext i32 %16 to i64
+  %add.16 = add nsw i64 %add.15, %conv.16
+  %arrayidx.17 = getelementptr inbounds nuw i32, ptr %x, i64 17
+  %17 = load i32, ptr %arrayidx.17
+  %conv.17 = sext i32 %17 to i64
+  %add.17 = add nsw i64 %add.16, %conv.17
+  %arrayidx.18 = getelementptr inbounds nuw i32, ptr %x, i64 18
+  %18 = load i32, ptr %arrayidx.18
+  %conv.18 = sext i32 %18 to i64
+  %add.18 = add nsw i64 %add.17, %conv.18
+  %arrayidx.19 = getelementptr inbounds nuw i32, ptr %x, i64 19
+  %19 = load i32, ptr %arrayidx.19
+  %conv.19 = sext i32 %19 to i64
+  %add.19 = add nsw i64 %add.18, %conv.19
+  %arrayidx.20 = getelementptr inbounds nuw i32, ptr %x, i64 20
+  %20 = load i32, ptr %arrayidx.20
+  %conv.20 = sext i32 %20 to i64
+  %add.20 = add nsw i64 %add.19, %conv.20
+  %arrayidx.21 = getelementptr inbounds nuw i32, ptr %x, i64 21
+  %21 = load i32, ptr %arrayidx.21
+  %conv.21 = sext i32 %21 to i64
+  %add.21 = add nsw i64 %add.20, %conv.21
+  %arrayidx.22 = getelementptr inbounds nuw i32, ptr %x, i64 22
+  %22 = load i32, ptr %arrayidx.22
+  %conv.22 = sext i32 %22 to i64
+  %add.22 = add nsw i64 %add.21, %conv.22
+  %arrayidx.23 = getelementptr inbounds nuw i32, ptr %x, i64 23
+  %23 = load i32, ptr %arrayidx.23
+  %conv.23 = sext i32 %23 to i64
+  %add.23 = add nsw i64 %add.22, %conv.23
+  %arrayidx.24 = getelementptr inbounds nuw i32, ptr %x, i64 24
+  %24 = load i32, ptr %arrayidx.24
+  %conv.24 = sext i32 %24 to i64
+  %add.24 = add nsw i64 %add.23, %conv.24
+  %arrayidx.25 = getelementptr inbounds nuw i32, ptr %x, i64 25
+  %25 = load i32, ptr %arrayidx.25
+  %conv.25 = sext i32 %25 to i64
+  %add.25 = add nsw i64 %add.24, %conv.25
+  %arrayidx.26 = getelementptr inbounds nuw i32, ptr %x, i64 26
+  %26 = load i32, ptr %arrayidx.26
+  %conv.26 = sext i32 %26 to i64
+  %add.26 = add nsw i64 %add.25, %conv.26
+  %arrayidx.27 = getelementptr inbounds nuw i32, ptr %x, i64 27
+  %27 = load i32, ptr %arrayidx.27
+  %conv.27 = sext i32 %27 to i64
+  %add.27 = add nsw i64 %add.26, %conv.27
+  %arrayidx.28 = getelementptr inbounds nuw i32, ptr %x, i64 28
+  %28 = load i32, ptr %arrayidx.28
+  %conv.28 = sext i32 %28 to i64
+  %add.28 = add nsw i64 %add.27, %conv.28
+  %arrayidx.29 = getelementptr inbounds nuw i32, ptr %x, i64 29
+  %29 = load i32, ptr %arrayidx.29
+  %conv.29 = sext i32 %29 to i64
+  %add.29 = add nsw i64 %add.28, %conv.29
+  %arrayidx.30 = getelementptr inbounds nuw i32, ptr %x, i64 30
+  %30 = load i32, ptr %arrayidx.30
+  %conv.30 = sext i32 %30 to i64
+  %add.30 = add nsw i64 %add.29, %conv.30
+  %arrayidx.31 = getelementptr inbounds nuw i32, ptr %x, i64 31
+  %31 = load i32, ptr %arrayidx.31
+  %conv.31 = sext i32 %31 to i64
+  %add.31 = add nsw i64 %add.30, %conv.31
+  ret i64 %add.31
+}
+
+
+
+; COST-LABEL: Function:  mla_v4i8_i32
+; COST: Cost:            '-6'
+define i32 @mla_v4i8_i32(ptr %x, ptr %y) "target-features"="+dotprod" {
+; CHECK-LABEL: @mla_v4i8_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[X:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <4 x i8> [[TMP0]] to <4 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[Y:%.*]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = sext <4 x i8> [[TMP2]] to <4 x i32>
+; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
+; CHECK-NEXT:    ret i32 [[TMP5]]
+;
+entry:
+  %0 = load i8, ptr %x
+  %conv = sext i8 %0 to i32
+  %1 = load i8, ptr %y
+  %conv3 = sext i8 %1 to i32
+  %mul = mul nsw i32 %conv3, %conv
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 1
+  %2 = load i8, ptr %arrayidx.1
+  %conv.1 = sext i8 %2 to i32
+  %arrayidx2.1 = getelementptr inbounds nuw i8, ptr %y, i64 1
+  %3 = load i8, ptr %arrayidx2.1
+  %conv3.1 = sext i8 %3 to i32
+  %mul.1 = mul nsw i32 %conv3.1, %conv.1
+  %add.1 = add nsw i32 %mul.1, %mul
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %4 = load i8, ptr %arrayidx.2
+  %conv.2 = sext i8 %4 to i32
+  %arrayidx2.2 = getelementptr inbounds nuw i8, ptr %y, i64 2
+  %5 = load i8, ptr %arrayidx2.2
+  %conv3.2 = sext i8 %5 to i32
+  %mul.2 = mul nsw i32 %conv3.2, %conv.2
+  %add.2 = add nsw i32 %mul.2, %add.1
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 3
+  %6 = load i8, ptr %arrayidx.3
+  %conv.3 = sext i8 %6 to i32
+  %arrayidx2.3 = getelementptr inbounds nuw i8, ptr %y, i64 3
+  %7 = load i8, ptr %arrayidx2.3
+  %conv3.3 = sext i8 %7 to i32
+  %mul.3 = mul nsw i32 %conv3.3, %conv.3
+  %add.3 = add nsw i32 %mul.3, %add.2
+  ret i32 %add.3
+}
+
+
+; COST-LABEL: Function:  mla_v8i8_i32
+; COST: Cost:            '-18'
+define i32 @mla_v8i8_i32(ptr %x, ptr %y) "target-features"="+dotprod" {
+; CHECK-LABEL: @mla_v8i8_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[X:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <8 x i8> [[TMP0]] to <8 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr [[Y:%.*]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = sext <8 x i8> [[TMP2]] to <8 x i32>
+; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <8 x i32> [[TMP3]], [[TMP1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]])
+; CHECK-NEXT:    ret i32 [[TMP5]]
+;
+entry:
+  %0 = load i8, ptr %x
+  %conv = sext i8 %0 to i32
+  %1 = load i8, ptr %y
+  %conv3 = sext i8 %1 to i32
+  %mul = mul nsw i32 %conv3, %conv
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 1
+  %2 = load i8, ptr %arrayidx.1
+  %conv.1 = sext i8 %2 to i32
+  %arrayidx2.1 = getelementptr inbounds nuw i8, ptr %y, i64 1
+  %3 = load i8, ptr %arrayidx2.1
+  %conv3.1 = sext i8 %3 to i32
+  %mul.1 = mul nsw i32 %conv3.1, %conv.1
+  %add.1 = add nsw i32 %mul.1, %mul
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %4 = load i8, ptr %arrayidx.2
+  %conv.2 = sext i8 %4 to i32
+  %arrayidx2.2 = getelementptr inbounds nuw i8, ptr %y, i64 2
+  %5 = load i8, ptr %arrayidx2.2
+  %conv3.2 = sext i8 %5 to i32
+  %mul.2 = mul nsw i32 %conv3.2, %conv.2
+  %add.2 = add nsw i32 %mul.2, %add.1
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 3
+  %6 = load i8, ptr %arrayidx.3
+  %conv.3 = sext i8 %6 to i32
+  %arrayidx2.3 = getelementptr inbounds nuw i8, ptr %y, i64 3
+  %7 = load i8, ptr %arrayidx2.3
+  %conv3.3 = sext i8 %7 to i32
+  %mul.3 = mul nsw i32 %conv3.3, %conv.3
+  %add.3 = add nsw i32 %mul.3, %add.2
+  %arrayidx.4 = getelementptr inbounds nuw i8, ptr %x, i64 4
+  %8 = load i8, ptr %arrayidx.4
+  %conv.4 = sext i8 %8 to i32
+  %arrayidx2.4 = getelementptr inbounds nuw i8, ptr %y, i64 4
+  %9 = load i8, ptr %arrayidx2.4
+  %conv3.4 = sext i8 %9 to i32
+  %mul.4 = mul nsw i32 %conv3.4, %conv.4
+  %add.4 = add nsw i32 %mul.4, %add.3
+  %arrayidx.5 = getelementptr inbounds nuw i8, ptr %x, i64 5
+  %10 = load i8, ptr %arrayidx.5
+  %conv.5 = sext i8 %10 to i32
+  %arrayidx2.5 = getelementptr inbounds nuw i8, ptr %y, i64 5
+  %11 = load i8, ptr %arrayidx2.5
+  %conv3.5 = sext i8 %11 to i32
+  %mul.5 = mul nsw i32 %conv3.5, %conv.5
+  %add.5 = add nsw i32 %mul.5, %add.4
+  %arrayidx.6 = getelementptr inbounds nuw i8, ptr %x, i64 6
+  %12 = load i8, ptr %arrayidx.6
+  %conv.6 = sext i8 %12 to i32
+  %arrayidx2.6 = getelementptr inbounds nuw i8, ptr %y, i64 6
+  %13 = load i8, ptr %arrayidx2.6
+  %conv3.6 = sext i8 %13 to i32
+  %mul.6 = mul nsw i32 %conv3.6, %conv.6
+  %add.6 = add nsw i32 %mul.6, %add.5
+  %arrayidx.7 = getelementptr inbounds nuw i8, ptr %x, i64 7
+  %14 = load i8, ptr %arrayidx.7
+  %conv.7 = sext i8 %14 to i32
+  %arrayidx2.7 = getelementptr inbounds nuw i8, ptr %y, i64 7
+  %15 = load i8, ptr %arrayidx2.7
+  %conv3.7 = sext i8 %15 to i32
+  %mul.7 = mul nsw i32 %conv3.7, %conv.7
+  %add.7 = add nsw i32 %mul.7, %add.6
+  ret i32 %add.7
+}
+
+
+; COST-LABEL: Function:  mla_v16i8_i32
+; COST: Cost:            '-40'
+define i32 @mla_v16i8_i32(ptr %x, ptr %y) "target-features"="+dotprod" {
+; CHECK-LABEL: @mla_v16i8_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[X:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = sext <16 x i8> [[TMP0]] to <16 x i32>
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr [[Y:%.*]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = sext <16 x i8> [[TMP2]] to <16 x i32>
+; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]])
+; CHECK-NEXT:    ret i32 [[TMP5]]
+;
+entry:
+  %0 = load i8, ptr %x
+  %conv = sext i8 %0 to i32
+  %1 = load i8, ptr %y
+  %conv3 = sext i8 %1 to i32
+  %mul = mul nsw i32 %conv3, %conv
+  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %x, i64 1
+  %2 = load i8, ptr %arrayidx.1
+  %conv.1 = sext i8 %2 to i32
+  %arrayidx2.1 = getelementptr inbounds nuw i8, ptr %y, i64 1
+  %3 = load i8, ptr %arrayidx2.1
+  %conv3.1 = sext i8 %3 to i32
+  %mul.1 = mul nsw i32 %conv3.1, %conv.1
+  %add.1 = add nsw i32 %mul.1, %mul
+  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %x, i64 2
+  %4 = load i8, ptr %arrayidx.2
+  %conv.2 = sext i8 %4 to i32
+  %arrayidx2.2 = getelementptr inbounds nuw i8, ptr %y, i64 2
+  %5 = load i8, ptr %arrayidx2.2
+  %conv3.2 = sext i8 %5 to i32
+  %mul.2 = mul nsw i32 %conv3.2, %conv.2
+  %add.2 = add nsw i32 %mul.2, %add.1
+  %arrayidx.3 = getelementptr inbounds nuw i8, ptr %x, i64 3
+  %6 = load i8, ptr %arrayidx.3
+  %conv.3 = sext i8 %6 to i32
+  %arrayidx2.3 = getelementptr inbounds nuw i8, ptr %y, i64 3
+  %7 = load i8, ptr %arrayidx2.3
+  %conv3.3 = sext i8 %7 to i32
+  %mul.3 = mul nsw i32 %conv3.3, %conv.3
+  %add.3 = add nsw i32 %mul.3, %add.2
+  %arrayidx.4 = getelementptr inbounds nuw i8, ptr %x, i64 4
+  %8 = load i8, ptr %arrayidx.4
+  %conv.4 = sext i8 %8 to i32
+  %arrayidx2.4 = getelementptr inbounds nuw i8, ptr %y, i64 4
+  %9 = load i8, ptr %arrayidx2.4
+  %conv3.4 = sext i8 %9 to i32
+  %mul.4 = mul nsw i32 %conv3.4, %conv.4
+  %add.4 = add nsw i32 %mul.4, %add.3
+  %arrayidx.5 = getelementptr inbounds nuw i8, ptr %x, i64 5
+  %10 = load i8, ptr %arrayidx.5
+  %conv.5 = sext i8 %10 to i32
+  %arrayidx2.5 = getelementptr inbounds nuw i8, ptr %y, i64 5
+  %11 = load i8, ptr %arrayidx2.5
+  %conv3.5 = sext i8 %11 to i32
+  %mul.5 = mul nsw i32 %conv3.5, %conv.5
+  %add.5 = add nsw i32 %mul.5, %add.4
+  %arrayidx.6 = getelementptr inbounds nuw i8, ptr %x, i64 6
+  %12 = load i8, ptr %arrayidx.6
+  %conv.6 = sext i8 %12 to i32
+  %arrayidx2.6 = getelementptr inbounds nuw i8, ptr %y, i64 6
+  %13 = load i8, ptr %arrayidx2.6
+  %conv3.6 = sext i8 %13 to i32
+  %mul.6 = mul nsw i32 %conv3.6, %conv.6
+  %add.6 = add nsw i32 %mul.6, %add.5
+  %arrayidx.7 = getelementptr inbounds nuw i8, ptr %x, i64 7
+  %14 = load i8, ptr %arrayidx.7
+  %conv.7 = sext i8 %14 to i32
+  %arrayidx2.7 = getelementptr inbounds nuw i8, ptr %y, i64 7
+  %15 = load i8, ptr %arrayidx2.7
+  %conv3.7 = sext i8 %15 to i32
+  %mul.7 = mul nsw i32 %conv3.7, %conv.7
+  %add.7 = add nsw i32 %mul.7, %add.6
+  %arrayidx.8 = getelementptr inbounds nuw i8, ptr %x, i64 8
+  %16 = load i8, ptr %arrayidx.8
+  %conv.8 = sext i8 %16 to i32
+  %arrayidx2.8 = getelementptr inbounds nuw i8, ptr %y, i64 8
+  %17 = load i8, ptr %arrayidx2.8
+  %conv3.8 = sext i8 %17 to i32
+  %mul.8 = mul nsw i32 %conv3.8, %conv.8
+  %add.8 = add nsw i32 %mul.8, %add.7
+  %arrayidx.9 = getelementptr inbounds nuw i8, ptr %x, i64 9
+  %18 = load i8, ptr %arrayidx.9
+  %conv.9 = sext i8 %18 to i32
+  %arrayidx2.9 = getelementptr inbounds nuw i8, ptr %y, i64 9
+  %19 = load i8, ptr %arrayidx2.9
+  %conv3.9 = sext i8 %19 to i32
+  %mul.9 = mul nsw i32 %conv3.9, %conv.9
+  %add.9 = add nsw i32 %mul.9, %add.8
+  %arrayidx.10 = getelementptr inbounds nuw i8, ptr %x, i64 10
+  %20 = load i8, ptr %arrayidx.10
+  %conv.10 = sext i8 %20 to i32
+  %arrayidx2.10 = getelementptr inbounds nuw i8, ptr %y, i64 10
+  %21 = load i8, ptr %arrayidx2.10
+  %conv3.10 = sext i8 %21 to i32
+  %mul.10 = mul nsw i32 %conv3.10, %conv.10
+  %add.10 = add nsw i32 %mul.10, %add.9
+  %arrayidx.11 = getelementptr inbounds nuw i8, ptr %x, i64 11
+  %22 = load i8, ptr %arrayidx.11
+  %conv.11 = sext i8 %22 to i32
+  %arrayidx2.11 = getelementptr inbounds nuw i8, ptr %y, i64 11
+  %23 = load i8, ptr %arrayidx2.11
+  %conv3.11 = sext i8 %23 to i32
+  %mul.11 = mul nsw i32 %conv3.11, %conv.11
+  %add.11 = add nsw i32 %mul.11, %add.10
+  %arrayidx.12 = getelementptr inbounds nuw i8, ptr %x, i64 12
+  %24 = load i8, ptr %arrayidx.12
+  %conv.12 = sext i8 %24 to i32
+  %arrayidx2.12 = getelementptr inbounds nuw i8, ptr %y, i64 12
+  %25 = load i8, ptr %arrayidx2.12
+  %conv3.12 = sext i8 %25 to i32
+  %mul.12 = mul nsw i32 %conv3.12, %conv.12
+  %add.12 = add nsw i32 %mul.12, %add.11
+  %arrayidx.13 = getelementptr inbounds nuw i8, ptr %x, i64 13
+  %26 = load i8, ptr %arrayidx.13
+  %conv.13 = sext i8 %26 to i32
+  %arrayidx2.13 = getelementptr inbounds nuw i8, ptr %y, i64 13
+  %27 = load i8, ptr %arrayidx2.13
+  %conv3.13 = sext i8 %27 to i32
+  %mul.13 = mul nsw i32 %conv3.13, %conv.13
+  %add.13 = add nsw i32 %mul.13, %add.12
+  %arrayidx.14 = getelementptr inbounds nuw i8, ptr %x, i64 14
+  %28 = load i8, ptr %arrayidx.14
+  %conv.14 = sext i8 %28 to i32
+  %arrayidx2.14 = getelementptr inbounds nuw i8, ptr %y, i64 14
+  %29 = load i8, ptr %arrayidx2.14
+  %conv3.14 = sext i8 %29 to i32
+  %mul.14 = mul nsw i32 %conv3.14, %conv.14
+  %add.14 = add nsw i32 %mul.14, %add.13
+  %arrayidx.15 = getelementptr inbounds nuw i8, ptr %x, i64 15
+  %30 = load i8, ptr %arrayidx.15
+  %conv.15 = sext i8 %30 to i32
+  %arrayidx2.15 = getelementptr inbounds nuw i8, ptr %y, i64 15
+  %31 = load i8, ptr %arrayidx2.15
+  %conv3.15 = sext i8 %31 to i32
+  %mul.15 = mul nsw i32 %conv3.15, %conv.15
+  %add.15 = add nsw i32 %mul.15, %add.14
+  ret i32 %add.15
+}


        


More information about the llvm-commits mailing list