[llvm] Reduced neon non-const strided load cost (PR #169731)

via llvm-commits llvm-commits at lists.llvm.org
Sat Dec 13 00:32:00 PST 2025


https://github.com/mwlon updated https://github.com/llvm/llvm-project/pull/169731

>From 25bd77bcbaab664cf74a6befec34dc1d8f74ec61 Mon Sep 17 00:00:00 2001
From: mwlon <m.w.loncaric at gmail.com>
Date: Wed, 26 Nov 2025 16:24:16 -0500
Subject: [PATCH 1/2] reduced neon non-const strided load cost

Signed-off-by: mwlon <m.w.loncaric at gmail.com>
---
 llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index edd61a2db705e..d4df4a1461a84 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -50,7 +50,7 @@ static cl::opt<unsigned> SVETailFoldInsnThreshold("sve-tail-folding-insn-thresho
                                                   cl::init(15), cl::Hidden);
 
 static cl::opt<unsigned>
-    NeonNonConstStrideOverhead("neon-nonconst-stride-overhead", cl::init(10),
+    NeonNonConstStrideOverhead("neon-nonconst-stride-overhead", cl::init(2),
                                cl::Hidden);
 
 static cl::opt<unsigned> CallPenaltyChangeSM(

>From 526085cf145ab4b598a98bb3da0e39d50b9b5000 Mon Sep 17 00:00:00 2001
From: mwlon <m.w.loncaric at gmail.com>
Date: Sat, 13 Dec 2025 21:28:57 +1300
Subject: [PATCH 2/2] replaced gather-cost test based on timing

Signed-off-by: mwlon <m.w.loncaric at gmail.com>
---
 .../LoopVectorize/AArch64/gather-cost.ll      |  85 -----
 .../AArch64/reduction-recurrence-costs-sve.ll | 321 +++++++++++++-----
 .../AArch64/replicating-load-store-costs.ll   | 147 ++++++--
 .../AArch64/vector-despite-neon-gather.ll     | 117 +++++++
 4 files changed, 478 insertions(+), 192 deletions(-)
 delete mode 100644 llvm/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
 create mode 100644 llvm/test/Transforms/LoopVectorize/AArch64/vector-despite-neon-gather.ll

diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/gather-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
deleted file mode 100644
index 8079c1e11ef54..0000000000000
--- a/llvm/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
+++ /dev/null
@@ -1,85 +0,0 @@
-; RUN: opt -passes=loop-vectorize -mtriple=arm64-apple-ios -S -mcpu=cyclone -enable-interleaved-mem-accesses=false < %s | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
-
- at kernel = global [512 x float] zeroinitializer, align 16
- at kernel2 = global [512 x float] zeroinitializer, align 16
- at kernel3 = global [512 x float] zeroinitializer, align 16
- at kernel4 = global [512 x float] zeroinitializer, align 16
- at src_data = global [1536 x float] zeroinitializer, align 16
- at r_ = global i8 0, align 1
- at g_ = global i8 0, align 1
- at b_ = global i8 0, align 1
-
-; We don't want to vectorize most loops containing gathers because they are
-; expensive.
-; Make sure we don't vectorize it.
-; CHECK-NOT: x float>
-
-define void @_Z4testmm(i64 %size, i64 %offset) {
-entry:
-  %cmp53 = icmp eq i64 %size, 0
-  br i1 %cmp53, label %for.end, label %for.body.lr.ph
-
-for.body.lr.ph:
-  br label %for.body
-
-for.body:
-  %r.057 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add10, %for.body ]
-  %g.056 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add20, %for.body ]
-  %v.055 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
-  %b.054 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add30, %for.body ]
-  %add = add i64 %v.055, %offset
-  %mul = mul i64 %add, 3
-  %arrayidx = getelementptr inbounds [1536 x float], ptr @src_data, i64 0, i64 %mul
-  %0 = load float, ptr %arrayidx, align 4
-  %arrayidx2 = getelementptr inbounds [512 x float], ptr @kernel, i64 0, i64 %v.055
-  %1 = load float, ptr %arrayidx2, align 4
-  %mul3 = fmul fast float %0, %1
-  %arrayidx4 = getelementptr inbounds [512 x float], ptr @kernel2, i64 0, i64 %v.055
-  %2 = load float, ptr %arrayidx4, align 4
-  %mul5 = fmul fast float %mul3, %2
-  %arrayidx6 = getelementptr inbounds [512 x float], ptr @kernel3, i64 0, i64 %v.055
-  %3 = load float, ptr %arrayidx6, align 4
-  %mul7 = fmul fast float %mul5, %3
-  %arrayidx8 = getelementptr inbounds [512 x float], ptr @kernel4, i64 0, i64 %v.055
-  %4 = load float, ptr %arrayidx8, align 4
-  %mul9 = fmul fast float %mul7, %4
-  %add10 = fadd fast float %r.057, %mul9
-  %arrayidx.sum = add i64 %mul, 1
-  %arrayidx11 = getelementptr inbounds [1536 x float], ptr @src_data, i64 0, i64 %arrayidx.sum
-  %5 = load float, ptr %arrayidx11, align 4
-  %mul13 = fmul fast float %1, %5
-  %mul15 = fmul fast float %2, %mul13
-  %mul17 = fmul fast float %3, %mul15
-  %mul19 = fmul fast float %4, %mul17
-  %add20 = fadd fast float %g.056, %mul19
-  %arrayidx.sum52 = add i64 %mul, 2
-  %arrayidx21 = getelementptr inbounds [1536 x float], ptr @src_data, i64 0, i64 %arrayidx.sum52
-  %6 = load float, ptr %arrayidx21, align 4
-  %mul23 = fmul fast float %1, %6
-  %mul25 = fmul fast float %2, %mul23
-  %mul27 = fmul fast float %3, %mul25
-  %mul29 = fmul fast float %4, %mul27
-  %add30 = fadd fast float %b.054, %mul29
-  %inc = add i64 %v.055, 1
-  %exitcond = icmp ne i64 %inc, %size
-  br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
-
-for.cond.for.end_crit_edge:
-  %add30.lcssa = phi float [ %add30, %for.body ]
-  %add20.lcssa = phi float [ %add20, %for.body ]
-  %add10.lcssa = phi float [ %add10, %for.body ]
-  %phitmp = fptoui float %add10.lcssa to i8
-  %phitmp60 = fptoui float %add20.lcssa to i8
-  %phitmp61 = fptoui float %add30.lcssa to i8
-  br label %for.end
-
-for.end:
-  %r.0.lcssa = phi i8 [ %phitmp, %for.cond.for.end_crit_edge ], [ 0, %entry ]
-  %g.0.lcssa = phi i8 [ %phitmp60, %for.cond.for.end_crit_edge ], [ 0, %entry ]
-  %b.0.lcssa = phi i8 [ %phitmp61, %for.cond.for.end_crit_edge ], [ 0, %entry ]
-  store i8 %r.0.lcssa, ptr @r_, align 1
-  store i8 %g.0.lcssa, ptr @g_, align 1
-  store i8 %b.0.lcssa, ptr @b_, align 1
-  ret void
-}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
index 44ae1757ce6e6..7ad8d3a0c9903 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
@@ -10,15 +10,136 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
 ; DEFAULT-LABEL: define i32 @chained_recurrences(
 ; DEFAULT-SAME: i32 [[X:%.*]], i64 [[Y:%.*]], ptr [[SRC_1:%.*]], i32 [[Z:%.*]], ptr [[SRC_2:%.*]]) #[[ATTR0:[0-9]+]] {
 ; DEFAULT-NEXT:  [[ENTRY:.*]]:
+; DEFAULT-NEXT:    [[TMP67:%.*]] = add i64 [[Y]], 1
+; DEFAULT-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP67]], 8
+; DEFAULT-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; DEFAULT:       [[VECTOR_PH]]:
+; DEFAULT-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP67]], 8
+; DEFAULT-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP67]], [[N_MOD_VF]]
+; DEFAULT-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[Z]], i64 0
+; DEFAULT-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; DEFAULT-NEXT:    [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> poison, i32 [[X]], i64 0
+; DEFAULT-NEXT:    [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1]], <4 x i32> poison, <4 x i32> zeroinitializer
+; DEFAULT-NEXT:    [[TMP1:%.*]] = add i64 [[Y]], 1
+; DEFAULT-NEXT:    [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP1]]
+; DEFAULT-NEXT:    [[TMP3:%.*]] = lshr <4 x i32> [[BROADCAST_SPLAT2]], splat (i32 1)
+; DEFAULT-NEXT:    [[TMP4:%.*]] = shl <4 x i32> [[BROADCAST_SPLAT2]], splat (i32 1)
+; DEFAULT-NEXT:    [[TMP5:%.*]] = or <4 x i32> [[TMP3]], [[TMP4]]
+; DEFAULT-NEXT:    [[TMP6:%.*]] = or <4 x i32> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT2]]
+; DEFAULT-NEXT:    [[TMP7:%.*]] = and <4 x i32> [[TMP6]], splat (i32 1)
+; DEFAULT-NEXT:    [[TMP8:%.*]] = xor <4 x i32> [[TMP7]], splat (i32 1)
+; DEFAULT-NEXT:    [[TMP9:%.*]] = zext <4 x i32> [[TMP8]] to <4 x i64>
+; DEFAULT-NEXT:    [[TMP10:%.*]] = extractelement <4 x i64> [[TMP9]], i32 0
+; DEFAULT-NEXT:    [[TMP11:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP10]]
+; DEFAULT-NEXT:    [[TMP12:%.*]] = extractelement <4 x i64> [[TMP9]], i32 1
+; DEFAULT-NEXT:    [[TMP13:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP12]]
+; DEFAULT-NEXT:    [[TMP14:%.*]] = extractelement <4 x i64> [[TMP9]], i32 2
+; DEFAULT-NEXT:    [[TMP15:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP14]]
+; DEFAULT-NEXT:    [[TMP16:%.*]] = extractelement <4 x i64> [[TMP9]], i32 3
+; DEFAULT-NEXT:    [[TMP17:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP16]]
+; DEFAULT-NEXT:    br label %[[VECTOR_BODY:.*]]
+; DEFAULT:       [[VECTOR_BODY]]:
+; DEFAULT-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; DEFAULT-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 0>, %[[VECTOR_PH]] ], [ [[BROADCAST_SPLAT6:%.*]], %[[VECTOR_BODY]] ]
+; DEFAULT-NEXT:    [[VECTOR_RECUR3:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 0>, %[[VECTOR_PH]] ], [ [[TMP20:%.*]], %[[VECTOR_BODY]] ]
+; DEFAULT-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP87:%.*]], %[[VECTOR_BODY]] ]
+; DEFAULT-NEXT:    [[VEC_PHI4:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP88:%.*]], %[[VECTOR_BODY]] ]
+; DEFAULT-NEXT:    [[TMP68:%.*]] = load i32, ptr [[GEP_1]], align 4
+; DEFAULT-NEXT:    [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <4 x i32> poison, i32 [[TMP68]], i64 0
+; DEFAULT-NEXT:    [[BROADCAST_SPLAT6]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT5]], <4 x i32> poison, <4 x i32> zeroinitializer
+; DEFAULT-NEXT:    [[TMP19:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[BROADCAST_SPLAT6]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; DEFAULT-NEXT:    [[TMP20]] = shufflevector <4 x i32> [[BROADCAST_SPLAT6]], <4 x i32> [[BROADCAST_SPLAT6]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; DEFAULT-NEXT:    [[TMP21:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR3]], <4 x i32> [[TMP19]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; DEFAULT-NEXT:    [[TMP22:%.*]] = shufflevector <4 x i32> [[TMP19]], <4 x i32> [[TMP20]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; DEFAULT-NEXT:    [[TMP23:%.*]] = or <4 x i32> [[TMP21]], [[BROADCAST_SPLAT2]]
+; DEFAULT-NEXT:    [[TMP24:%.*]] = or <4 x i32> [[TMP22]], [[BROADCAST_SPLAT2]]
+; DEFAULT-NEXT:    [[TMP25:%.*]] = shl <4 x i32> [[TMP23]], splat (i32 1)
+; DEFAULT-NEXT:    [[TMP26:%.*]] = shl <4 x i32> [[TMP24]], splat (i32 1)
+; DEFAULT-NEXT:    [[TMP27:%.*]] = or <4 x i32> [[TMP25]], splat (i32 2)
+; DEFAULT-NEXT:    [[TMP28:%.*]] = or <4 x i32> [[TMP26]], splat (i32 2)
+; DEFAULT-NEXT:    [[TMP29:%.*]] = or <4 x i32> [[TMP5]], [[TMP27]]
+; DEFAULT-NEXT:    [[TMP30:%.*]] = or <4 x i32> [[TMP5]], [[TMP28]]
+; DEFAULT-NEXT:    [[TMP31:%.*]] = or <4 x i32> [[TMP29]], [[BROADCAST_SPLAT2]]
+; DEFAULT-NEXT:    [[TMP32:%.*]] = or <4 x i32> [[TMP30]], [[BROADCAST_SPLAT2]]
+; DEFAULT-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP11]], align 4
+; DEFAULT-NEXT:    [[TMP34:%.*]] = load i32, ptr [[TMP13]], align 4
+; DEFAULT-NEXT:    [[TMP35:%.*]] = load i32, ptr [[TMP15]], align 4
+; DEFAULT-NEXT:    [[TMP36:%.*]] = load i32, ptr [[TMP17]], align 4
+; DEFAULT-NEXT:    [[TMP37:%.*]] = insertelement <4 x i32> poison, i32 [[TMP33]], i32 0
+; DEFAULT-NEXT:    [[TMP38:%.*]] = insertelement <4 x i32> [[TMP37]], i32 [[TMP34]], i32 1
+; DEFAULT-NEXT:    [[TMP39:%.*]] = insertelement <4 x i32> [[TMP38]], i32 [[TMP35]], i32 2
+; DEFAULT-NEXT:    [[TMP40:%.*]] = insertelement <4 x i32> [[TMP39]], i32 [[TMP36]], i32 3
+; DEFAULT-NEXT:    [[TMP41:%.*]] = load i32, ptr [[TMP11]], align 4
+; DEFAULT-NEXT:    [[TMP42:%.*]] = load i32, ptr [[TMP13]], align 4
+; DEFAULT-NEXT:    [[TMP43:%.*]] = load i32, ptr [[TMP15]], align 4
+; DEFAULT-NEXT:    [[TMP44:%.*]] = load i32, ptr [[TMP17]], align 4
+; DEFAULT-NEXT:    [[TMP45:%.*]] = insertelement <4 x i32> poison, i32 [[TMP41]], i32 0
+; DEFAULT-NEXT:    [[TMP46:%.*]] = insertelement <4 x i32> [[TMP45]], i32 [[TMP42]], i32 1
+; DEFAULT-NEXT:    [[TMP47:%.*]] = insertelement <4 x i32> [[TMP46]], i32 [[TMP43]], i32 2
+; DEFAULT-NEXT:    [[TMP48:%.*]] = insertelement <4 x i32> [[TMP47]], i32 [[TMP44]], i32 3
+; DEFAULT-NEXT:    [[TMP49:%.*]] = lshr <4 x i32> [[TMP31]], splat (i32 1)
+; DEFAULT-NEXT:    [[TMP50:%.*]] = lshr <4 x i32> [[TMP32]], splat (i32 1)
+; DEFAULT-NEXT:    [[TMP51:%.*]] = zext <4 x i32> [[TMP49]] to <4 x i64>
+; DEFAULT-NEXT:    [[TMP52:%.*]] = extractelement <4 x i64> [[TMP51]], i32 0
+; DEFAULT-NEXT:    [[TMP53:%.*]] = extractelement <4 x i64> [[TMP51]], i32 1
+; DEFAULT-NEXT:    [[TMP54:%.*]] = extractelement <4 x i64> [[TMP51]], i32 2
+; DEFAULT-NEXT:    [[TMP55:%.*]] = extractelement <4 x i64> [[TMP51]], i32 3
+; DEFAULT-NEXT:    [[TMP56:%.*]] = zext <4 x i32> [[TMP50]] to <4 x i64>
+; DEFAULT-NEXT:    [[TMP57:%.*]] = extractelement <4 x i64> [[TMP56]], i32 0
+; DEFAULT-NEXT:    [[TMP58:%.*]] = extractelement <4 x i64> [[TMP56]], i32 1
+; DEFAULT-NEXT:    [[TMP59:%.*]] = extractelement <4 x i64> [[TMP56]], i32 2
+; DEFAULT-NEXT:    [[TMP60:%.*]] = extractelement <4 x i64> [[TMP56]], i32 3
+; DEFAULT-NEXT:    [[TMP61:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP52]]
+; DEFAULT-NEXT:    [[TMP62:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP53]]
+; DEFAULT-NEXT:    [[TMP63:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP54]]
+; DEFAULT-NEXT:    [[TMP64:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP55]]
+; DEFAULT-NEXT:    [[TMP65:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP57]]
+; DEFAULT-NEXT:    [[TMP66:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP58]]
+; DEFAULT-NEXT:    [[TMP92:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP59]]
+; DEFAULT-NEXT:    [[TMP95:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP60]]
+; DEFAULT-NEXT:    [[TMP96:%.*]] = load i32, ptr [[TMP61]], align 4
+; DEFAULT-NEXT:    [[TMP97:%.*]] = load i32, ptr [[TMP62]], align 4
+; DEFAULT-NEXT:    [[TMP98:%.*]] = load i32, ptr [[TMP63]], align 4
+; DEFAULT-NEXT:    [[TMP99:%.*]] = load i32, ptr [[TMP64]], align 4
+; DEFAULT-NEXT:    [[TMP100:%.*]] = insertelement <4 x i32> poison, i32 [[TMP96]], i32 0
+; DEFAULT-NEXT:    [[TMP101:%.*]] = insertelement <4 x i32> [[TMP100]], i32 [[TMP97]], i32 1
+; DEFAULT-NEXT:    [[TMP102:%.*]] = insertelement <4 x i32> [[TMP101]], i32 [[TMP98]], i32 2
+; DEFAULT-NEXT:    [[TMP76:%.*]] = insertelement <4 x i32> [[TMP102]], i32 [[TMP99]], i32 3
+; DEFAULT-NEXT:    [[TMP77:%.*]] = load i32, ptr [[TMP65]], align 4
+; DEFAULT-NEXT:    [[TMP78:%.*]] = load i32, ptr [[TMP66]], align 4
+; DEFAULT-NEXT:    [[TMP79:%.*]] = load i32, ptr [[TMP92]], align 4
+; DEFAULT-NEXT:    [[TMP80:%.*]] = load i32, ptr [[TMP95]], align 4
+; DEFAULT-NEXT:    [[TMP81:%.*]] = insertelement <4 x i32> poison, i32 [[TMP77]], i32 0
+; DEFAULT-NEXT:    [[TMP82:%.*]] = insertelement <4 x i32> [[TMP81]], i32 [[TMP78]], i32 1
+; DEFAULT-NEXT:    [[TMP83:%.*]] = insertelement <4 x i32> [[TMP82]], i32 [[TMP79]], i32 2
+; DEFAULT-NEXT:    [[TMP84:%.*]] = insertelement <4 x i32> [[TMP83]], i32 [[TMP80]], i32 3
+; DEFAULT-NEXT:    [[TMP85:%.*]] = or <4 x i32> [[TMP40]], [[VEC_PHI]]
+; DEFAULT-NEXT:    [[TMP86:%.*]] = or <4 x i32> [[TMP48]], [[VEC_PHI4]]
+; DEFAULT-NEXT:    [[TMP87]] = or <4 x i32> [[TMP85]], [[TMP76]]
+; DEFAULT-NEXT:    [[TMP88]] = or <4 x i32> [[TMP86]], [[TMP84]]
+; DEFAULT-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; DEFAULT-NEXT:    [[TMP89:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; DEFAULT-NEXT:    br i1 [[TMP89]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; DEFAULT:       [[MIDDLE_BLOCK]]:
+; DEFAULT-NEXT:    [[BIN_RDX:%.*]] = or <4 x i32> [[TMP88]], [[TMP87]]
+; DEFAULT-NEXT:    [[TMP90:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[BIN_RDX]])
+; DEFAULT-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP20]], i32 3
+; DEFAULT-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP67]], [[N_VEC]]
+; DEFAULT-NEXT:    br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; DEFAULT:       [[SCALAR_PH]]:
+; DEFAULT-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[TMP68]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; DEFAULT-NEXT:    [[SCALAR_RECUR_INIT7:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; DEFAULT-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; DEFAULT-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP90]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
 ; DEFAULT-NEXT:    br label %[[LOOP:.*]]
 ; DEFAULT:       [[LOOP]]:
-; DEFAULT-NEXT:    [[TMP0:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[TMP68:%.*]], %[[LOOP]] ]
-; DEFAULT-NEXT:    [[SCALAR_RECUR15:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[TMP0]], %[[LOOP]] ]
-; DEFAULT-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; DEFAULT-NEXT:    [[SUM_RED:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RED_2:%.*]], %[[LOOP]] ]
-; DEFAULT-NEXT:    [[TMP67:%.*]] = add i64 [[Y]], 1
-; DEFAULT-NEXT:    [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP67]]
-; DEFAULT-NEXT:    [[TMP68]] = load i32, ptr [[GEP_1]], align 4
+; DEFAULT-NEXT:    [[TMP91:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP94:%.*]], %[[LOOP]] ]
+; DEFAULT-NEXT:    [[SCALAR_RECUR15:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT7]], %[[SCALAR_PH]] ], [ [[TMP91]], %[[LOOP]] ]
+; DEFAULT-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; DEFAULT-NEXT:    [[SUM_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ]
+; DEFAULT-NEXT:    [[TMP93:%.*]] = add i64 [[Y]], 1
+; DEFAULT-NEXT:    [[GEP_4:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP93]]
+; DEFAULT-NEXT:    [[TMP94]] = load i32, ptr [[GEP_4]], align 4
 ; DEFAULT-NEXT:    [[OR3:%.*]] = or i32 [[SCALAR_RECUR15]], [[X]]
 ; DEFAULT-NEXT:    [[IV_NEXT]] = add i64 [[IV]], 1
 ; DEFAULT-NEXT:    [[SHR:%.*]] = lshr i32 [[X]], 1
@@ -41,105 +162,139 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
 ; DEFAULT-NEXT:    [[RED_1:%.*]] = or i32 [[TMP74]], [[SUM_RED]]
 ; DEFAULT-NEXT:    [[RED_2]] = or i32 [[RED_1]], [[TMP75]]
 ; DEFAULT-NEXT:    [[EC:%.*]] = icmp eq i64 [[IV]], [[Y]]
-; DEFAULT-NEXT:    br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; DEFAULT-NEXT:    br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
 ; DEFAULT:       [[EXIT]]:
-; DEFAULT-NEXT:    [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], %[[LOOP]] ]
+; DEFAULT-NEXT:    [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], %[[LOOP]] ], [ [[TMP90]], %[[MIDDLE_BLOCK]] ]
 ; DEFAULT-NEXT:    ret i32 [[RED_2_LCSSA]]
 ;
 ; VSCALEFORTUNING2-LABEL: define i32 @chained_recurrences(
 ; VSCALEFORTUNING2-SAME: i32 [[X:%.*]], i64 [[Y:%.*]], ptr [[SRC_1:%.*]], i32 [[Z:%.*]], ptr [[SRC_2:%.*]]) #[[ATTR0:[0-9]+]] {
 ; VSCALEFORTUNING2-NEXT:  [[ENTRY:.*]]:
 ; VSCALEFORTUNING2-NEXT:    [[TMP0:%.*]] = add i64 [[Y]], 1
-; VSCALEFORTUNING2-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; VSCALEFORTUNING2-NEXT:    [[TMP2:%.*]] = shl nuw i64 [[TMP1]], 3
-; VSCALEFORTUNING2-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
+; VSCALEFORTUNING2-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 8
 ; VSCALEFORTUNING2-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
 ; VSCALEFORTUNING2:       [[VECTOR_PH]]:
-; VSCALEFORTUNING2-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; VSCALEFORTUNING2-NEXT:    [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
-; VSCALEFORTUNING2-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
+; VSCALEFORTUNING2-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 8
 ; VSCALEFORTUNING2-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
-; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[Z]], i64 0
-; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0
-; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[Z]], i64 0
+; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> poison, i32 [[X]], i64 0
+; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1]], <4 x i32> poison, <4 x i32> zeroinitializer
 ; VSCALEFORTUNING2-NEXT:    [[TMP7:%.*]] = add i64 [[Y]], 1
 ; VSCALEFORTUNING2-NEXT:    [[TMP8:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP7]]
-; VSCALEFORTUNING2-NEXT:    [[TMP9:%.*]] = lshr <vscale x 4 x i32> [[BROADCAST_SPLAT]], splat (i32 1)
-; VSCALEFORTUNING2-NEXT:    [[TMP10:%.*]] = shl <vscale x 4 x i32> [[BROADCAST_SPLAT]], splat (i32 1)
-; VSCALEFORTUNING2-NEXT:    [[TMP11:%.*]] = or <vscale x 4 x i32> [[TMP9]], [[TMP10]]
-; VSCALEFORTUNING2-NEXT:    [[TMP12:%.*]] = or <vscale x 4 x i32> [[BROADCAST_SPLAT2]], [[BROADCAST_SPLAT]]
-; VSCALEFORTUNING2-NEXT:    [[TMP13:%.*]] = and <vscale x 4 x i32> [[TMP12]], splat (i32 1)
-; VSCALEFORTUNING2-NEXT:    [[TMP14:%.*]] = xor <vscale x 4 x i32> [[TMP13]], splat (i32 1)
-; VSCALEFORTUNING2-NEXT:    [[TMP15:%.*]] = zext <vscale x 4 x i32> [[TMP14]] to <vscale x 4 x i64>
-; VSCALEFORTUNING2-NEXT:    [[DOTSPLAT:%.*]] = getelementptr i32, ptr [[SRC_2]], <vscale x 4 x i64> [[TMP15]]
-; VSCALEFORTUNING2-NEXT:    [[TMP18:%.*]] = call i32 @llvm.vscale.i32()
-; VSCALEFORTUNING2-NEXT:    [[TMP19:%.*]] = mul nuw i32 [[TMP18]], 4
-; VSCALEFORTUNING2-NEXT:    [[TMP20:%.*]] = sub i32 [[TMP19]], 1
-; VSCALEFORTUNING2-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 0, i32 [[TMP20]]
-; VSCALEFORTUNING2-NEXT:    [[TMP21:%.*]] = call i32 @llvm.vscale.i32()
-; VSCALEFORTUNING2-NEXT:    [[TMP22:%.*]] = mul nuw i32 [[TMP21]], 4
-; VSCALEFORTUNING2-NEXT:    [[TMP23:%.*]] = sub i32 [[TMP22]], 1
-; VSCALEFORTUNING2-NEXT:    [[VECTOR_RECUR_INIT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 0, i32 [[TMP23]]
+; VSCALEFORTUNING2-NEXT:    [[TMP3:%.*]] = lshr <4 x i32> [[BROADCAST_SPLAT2]], splat (i32 1)
+; VSCALEFORTUNING2-NEXT:    [[TMP4:%.*]] = shl <4 x i32> [[BROADCAST_SPLAT2]], splat (i32 1)
+; VSCALEFORTUNING2-NEXT:    [[TMP5:%.*]] = or <4 x i32> [[TMP3]], [[TMP4]]
+; VSCALEFORTUNING2-NEXT:    [[TMP6:%.*]] = or <4 x i32> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT2]]
+; VSCALEFORTUNING2-NEXT:    [[TMP18:%.*]] = and <4 x i32> [[TMP6]], splat (i32 1)
+; VSCALEFORTUNING2-NEXT:    [[TMP89:%.*]] = xor <4 x i32> [[TMP18]], splat (i32 1)
+; VSCALEFORTUNING2-NEXT:    [[TMP9:%.*]] = zext <4 x i32> [[TMP89]] to <4 x i64>
+; VSCALEFORTUNING2-NEXT:    [[TMP10:%.*]] = extractelement <4 x i64> [[TMP9]], i32 0
+; VSCALEFORTUNING2-NEXT:    [[TMP11:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP10]]
+; VSCALEFORTUNING2-NEXT:    [[TMP12:%.*]] = extractelement <4 x i64> [[TMP9]], i32 1
+; VSCALEFORTUNING2-NEXT:    [[TMP13:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP12]]
+; VSCALEFORTUNING2-NEXT:    [[TMP14:%.*]] = extractelement <4 x i64> [[TMP9]], i32 2
+; VSCALEFORTUNING2-NEXT:    [[TMP15:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP14]]
+; VSCALEFORTUNING2-NEXT:    [[TMP16:%.*]] = extractelement <4 x i64> [[TMP9]], i32 3
+; VSCALEFORTUNING2-NEXT:    [[TMP17:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP16]]
 ; VSCALEFORTUNING2-NEXT:    br label %[[VECTOR_BODY:.*]]
 ; VSCALEFORTUNING2:       [[VECTOR_BODY]]:
 ; VSCALEFORTUNING2-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; VSCALEFORTUNING2-NEXT:    [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[BROADCAST_SPLAT7:%.*]], %[[VECTOR_BODY]] ]
-; VSCALEFORTUNING2-NEXT:    [[VECTOR_RECUR4:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT3]], %[[VECTOR_PH]] ], [ [[TMP26:%.*]], %[[VECTOR_BODY]] ]
-; VSCALEFORTUNING2-NEXT:    [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP47:%.*]], %[[VECTOR_BODY]] ]
-; VSCALEFORTUNING2-NEXT:    [[VEC_PHI5:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP48:%.*]], %[[VECTOR_BODY]] ]
+; VSCALEFORTUNING2-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 0>, %[[VECTOR_PH]] ], [ [[BROADCAST_SPLAT6:%.*]], %[[VECTOR_BODY]] ]
+; VSCALEFORTUNING2-NEXT:    [[VECTOR_RECUR3:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 0>, %[[VECTOR_PH]] ], [ [[TMP20:%.*]], %[[VECTOR_BODY]] ]
+; VSCALEFORTUNING2-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP87:%.*]], %[[VECTOR_BODY]] ]
+; VSCALEFORTUNING2-NEXT:    [[VEC_PHI4:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP88:%.*]], %[[VECTOR_BODY]] ]
 ; VSCALEFORTUNING2-NEXT:    [[TMP24:%.*]] = load i32, ptr [[TMP8]], align 4
-; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLATINSERT6:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP24]], i64 0
-; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLAT7]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT6]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; VSCALEFORTUNING2-NEXT:    [[TMP25:%.*]] = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> [[VECTOR_RECUR]], <vscale x 4 x i32> [[BROADCAST_SPLAT7]], i32 -1)
-; VSCALEFORTUNING2-NEXT:    [[TMP26]] = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> [[BROADCAST_SPLAT7]], <vscale x 4 x i32> [[BROADCAST_SPLAT7]], i32 -1)
-; VSCALEFORTUNING2-NEXT:    [[TMP27:%.*]] = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> [[VECTOR_RECUR4]], <vscale x 4 x i32> [[TMP25]], i32 -1)
-; VSCALEFORTUNING2-NEXT:    [[TMP28:%.*]] = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> [[TMP25]], <vscale x 4 x i32> [[TMP26]], i32 -1)
-; VSCALEFORTUNING2-NEXT:    [[TMP29:%.*]] = or <vscale x 4 x i32> [[TMP27]], [[BROADCAST_SPLAT]]
-; VSCALEFORTUNING2-NEXT:    [[TMP30:%.*]] = or <vscale x 4 x i32> [[TMP28]], [[BROADCAST_SPLAT]]
-; VSCALEFORTUNING2-NEXT:    [[TMP31:%.*]] = shl <vscale x 4 x i32> [[TMP29]], splat (i32 1)
-; VSCALEFORTUNING2-NEXT:    [[TMP32:%.*]] = shl <vscale x 4 x i32> [[TMP30]], splat (i32 1)
-; VSCALEFORTUNING2-NEXT:    [[TMP33:%.*]] = or <vscale x 4 x i32> [[TMP31]], splat (i32 2)
-; VSCALEFORTUNING2-NEXT:    [[TMP34:%.*]] = or <vscale x 4 x i32> [[TMP32]], splat (i32 2)
-; VSCALEFORTUNING2-NEXT:    [[TMP35:%.*]] = or <vscale x 4 x i32> [[TMP11]], [[TMP33]]
-; VSCALEFORTUNING2-NEXT:    [[TMP36:%.*]] = or <vscale x 4 x i32> [[TMP11]], [[TMP34]]
-; VSCALEFORTUNING2-NEXT:    [[TMP37:%.*]] = or <vscale x 4 x i32> [[TMP35]], [[BROADCAST_SPLAT]]
-; VSCALEFORTUNING2-NEXT:    [[TMP38:%.*]] = or <vscale x 4 x i32> [[TMP36]], [[BROADCAST_SPLAT]]
-; VSCALEFORTUNING2-NEXT:    [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[DOTSPLAT]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; VSCALEFORTUNING2-NEXT:    [[WIDE_MASKED_GATHER8:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[DOTSPLAT]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; VSCALEFORTUNING2-NEXT:    [[TMP39:%.*]] = lshr <vscale x 4 x i32> [[TMP37]], splat (i32 1)
-; VSCALEFORTUNING2-NEXT:    [[TMP40:%.*]] = lshr <vscale x 4 x i32> [[TMP38]], splat (i32 1)
-; VSCALEFORTUNING2-NEXT:    [[TMP41:%.*]] = zext <vscale x 4 x i32> [[TMP39]] to <vscale x 4 x i64>
-; VSCALEFORTUNING2-NEXT:    [[TMP42:%.*]] = zext <vscale x 4 x i32> [[TMP40]] to <vscale x 4 x i64>
-; VSCALEFORTUNING2-NEXT:    [[TMP43:%.*]] = getelementptr i32, ptr [[SRC_2]], <vscale x 4 x i64> [[TMP41]]
-; VSCALEFORTUNING2-NEXT:    [[TMP44:%.*]] = getelementptr i32, ptr [[SRC_2]], <vscale x 4 x i64> [[TMP42]]
-; VSCALEFORTUNING2-NEXT:    [[WIDE_MASKED_GATHER9:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP43]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; VSCALEFORTUNING2-NEXT:    [[WIDE_MASKED_GATHER10:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP44]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
-; VSCALEFORTUNING2-NEXT:    [[TMP45:%.*]] = or <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
-; VSCALEFORTUNING2-NEXT:    [[TMP46:%.*]] = or <vscale x 4 x i32> [[WIDE_MASKED_GATHER8]], [[VEC_PHI5]]
-; VSCALEFORTUNING2-NEXT:    [[TMP47]] = or <vscale x 4 x i32> [[TMP45]], [[WIDE_MASKED_GATHER9]]
-; VSCALEFORTUNING2-NEXT:    [[TMP48]] = or <vscale x 4 x i32> [[TMP46]], [[WIDE_MASKED_GATHER10]]
-; VSCALEFORTUNING2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
+; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <4 x i32> poison, i32 [[TMP24]], i64 0
+; VSCALEFORTUNING2-NEXT:    [[BROADCAST_SPLAT6]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT5]], <4 x i32> poison, <4 x i32> zeroinitializer
+; VSCALEFORTUNING2-NEXT:    [[TMP19:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[BROADCAST_SPLAT6]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; VSCALEFORTUNING2-NEXT:    [[TMP20]] = shufflevector <4 x i32> [[BROADCAST_SPLAT6]], <4 x i32> [[BROADCAST_SPLAT6]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; VSCALEFORTUNING2-NEXT:    [[TMP21:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR3]], <4 x i32> [[TMP19]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; VSCALEFORTUNING2-NEXT:    [[TMP22:%.*]] = shufflevector <4 x i32> [[TMP19]], <4 x i32> [[TMP20]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; VSCALEFORTUNING2-NEXT:    [[TMP23:%.*]] = or <4 x i32> [[TMP21]], [[BROADCAST_SPLAT2]]
+; VSCALEFORTUNING2-NEXT:    [[TMP92:%.*]] = or <4 x i32> [[TMP22]], [[BROADCAST_SPLAT2]]
+; VSCALEFORTUNING2-NEXT:    [[TMP25:%.*]] = shl <4 x i32> [[TMP23]], splat (i32 1)
+; VSCALEFORTUNING2-NEXT:    [[TMP26:%.*]] = shl <4 x i32> [[TMP92]], splat (i32 1)
+; VSCALEFORTUNING2-NEXT:    [[TMP27:%.*]] = or <4 x i32> [[TMP25]], splat (i32 2)
+; VSCALEFORTUNING2-NEXT:    [[TMP28:%.*]] = or <4 x i32> [[TMP26]], splat (i32 2)
+; VSCALEFORTUNING2-NEXT:    [[TMP29:%.*]] = or <4 x i32> [[TMP5]], [[TMP27]]
+; VSCALEFORTUNING2-NEXT:    [[TMP30:%.*]] = or <4 x i32> [[TMP5]], [[TMP28]]
+; VSCALEFORTUNING2-NEXT:    [[TMP31:%.*]] = or <4 x i32> [[TMP29]], [[BROADCAST_SPLAT2]]
+; VSCALEFORTUNING2-NEXT:    [[TMP32:%.*]] = or <4 x i32> [[TMP30]], [[BROADCAST_SPLAT2]]
+; VSCALEFORTUNING2-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP11]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP34:%.*]] = load i32, ptr [[TMP13]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP35:%.*]] = load i32, ptr [[TMP15]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP36:%.*]] = load i32, ptr [[TMP17]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP37:%.*]] = insertelement <4 x i32> poison, i32 [[TMP33]], i32 0
+; VSCALEFORTUNING2-NEXT:    [[TMP38:%.*]] = insertelement <4 x i32> [[TMP37]], i32 [[TMP34]], i32 1
+; VSCALEFORTUNING2-NEXT:    [[TMP39:%.*]] = insertelement <4 x i32> [[TMP38]], i32 [[TMP35]], i32 2
+; VSCALEFORTUNING2-NEXT:    [[TMP40:%.*]] = insertelement <4 x i32> [[TMP39]], i32 [[TMP36]], i32 3
+; VSCALEFORTUNING2-NEXT:    [[TMP41:%.*]] = load i32, ptr [[TMP11]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP42:%.*]] = load i32, ptr [[TMP13]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP43:%.*]] = load i32, ptr [[TMP15]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP44:%.*]] = load i32, ptr [[TMP17]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP45:%.*]] = insertelement <4 x i32> poison, i32 [[TMP41]], i32 0
+; VSCALEFORTUNING2-NEXT:    [[TMP46:%.*]] = insertelement <4 x i32> [[TMP45]], i32 [[TMP42]], i32 1
+; VSCALEFORTUNING2-NEXT:    [[TMP47:%.*]] = insertelement <4 x i32> [[TMP46]], i32 [[TMP43]], i32 2
+; VSCALEFORTUNING2-NEXT:    [[TMP48:%.*]] = insertelement <4 x i32> [[TMP47]], i32 [[TMP44]], i32 3
+; VSCALEFORTUNING2-NEXT:    [[TMP93:%.*]] = lshr <4 x i32> [[TMP31]], splat (i32 1)
+; VSCALEFORTUNING2-NEXT:    [[TMP50:%.*]] = lshr <4 x i32> [[TMP32]], splat (i32 1)
+; VSCALEFORTUNING2-NEXT:    [[TMP51:%.*]] = zext <4 x i32> [[TMP93]] to <4 x i64>
+; VSCALEFORTUNING2-NEXT:    [[TMP52:%.*]] = extractelement <4 x i64> [[TMP51]], i32 0
+; VSCALEFORTUNING2-NEXT:    [[TMP53:%.*]] = extractelement <4 x i64> [[TMP51]], i32 1
+; VSCALEFORTUNING2-NEXT:    [[TMP54:%.*]] = extractelement <4 x i64> [[TMP51]], i32 2
+; VSCALEFORTUNING2-NEXT:    [[TMP94:%.*]] = extractelement <4 x i64> [[TMP51]], i32 3
+; VSCALEFORTUNING2-NEXT:    [[TMP95:%.*]] = zext <4 x i32> [[TMP50]] to <4 x i64>
+; VSCALEFORTUNING2-NEXT:    [[TMP96:%.*]] = extractelement <4 x i64> [[TMP95]], i32 0
+; VSCALEFORTUNING2-NEXT:    [[TMP97:%.*]] = extractelement <4 x i64> [[TMP95]], i32 1
+; VSCALEFORTUNING2-NEXT:    [[TMP98:%.*]] = extractelement <4 x i64> [[TMP95]], i32 2
+; VSCALEFORTUNING2-NEXT:    [[TMP99:%.*]] = extractelement <4 x i64> [[TMP95]], i32 3
+; VSCALEFORTUNING2-NEXT:    [[TMP100:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP52]]
+; VSCALEFORTUNING2-NEXT:    [[TMP101:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP53]]
+; VSCALEFORTUNING2-NEXT:    [[TMP102:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP54]]
+; VSCALEFORTUNING2-NEXT:    [[TMP103:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP94]]
+; VSCALEFORTUNING2-NEXT:    [[TMP65:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP96]]
+; VSCALEFORTUNING2-NEXT:    [[TMP66:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP97]]
+; VSCALEFORTUNING2-NEXT:    [[TMP67:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP98]]
+; VSCALEFORTUNING2-NEXT:    [[TMP68:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP99]]
+; VSCALEFORTUNING2-NEXT:    [[TMP69:%.*]] = load i32, ptr [[TMP100]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP70:%.*]] = load i32, ptr [[TMP101]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP71:%.*]] = load i32, ptr [[TMP102]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP72:%.*]] = load i32, ptr [[TMP103]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP73:%.*]] = insertelement <4 x i32> poison, i32 [[TMP69]], i32 0
+; VSCALEFORTUNING2-NEXT:    [[TMP74:%.*]] = insertelement <4 x i32> [[TMP73]], i32 [[TMP70]], i32 1
+; VSCALEFORTUNING2-NEXT:    [[TMP75:%.*]] = insertelement <4 x i32> [[TMP74]], i32 [[TMP71]], i32 2
+; VSCALEFORTUNING2-NEXT:    [[TMP76:%.*]] = insertelement <4 x i32> [[TMP75]], i32 [[TMP72]], i32 3
+; VSCALEFORTUNING2-NEXT:    [[TMP77:%.*]] = load i32, ptr [[TMP65]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP78:%.*]] = load i32, ptr [[TMP66]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP79:%.*]] = load i32, ptr [[TMP67]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP80:%.*]] = load i32, ptr [[TMP68]], align 4
+; VSCALEFORTUNING2-NEXT:    [[TMP81:%.*]] = insertelement <4 x i32> poison, i32 [[TMP77]], i32 0
+; VSCALEFORTUNING2-NEXT:    [[TMP82:%.*]] = insertelement <4 x i32> [[TMP81]], i32 [[TMP78]], i32 1
+; VSCALEFORTUNING2-NEXT:    [[TMP83:%.*]] = insertelement <4 x i32> [[TMP82]], i32 [[TMP79]], i32 2
+; VSCALEFORTUNING2-NEXT:    [[TMP84:%.*]] = insertelement <4 x i32> [[TMP83]], i32 [[TMP80]], i32 3
+; VSCALEFORTUNING2-NEXT:    [[TMP85:%.*]] = or <4 x i32> [[TMP40]], [[VEC_PHI]]
+; VSCALEFORTUNING2-NEXT:    [[TMP86:%.*]] = or <4 x i32> [[TMP48]], [[VEC_PHI4]]
+; VSCALEFORTUNING2-NEXT:    [[TMP87]] = or <4 x i32> [[TMP85]], [[TMP76]]
+; VSCALEFORTUNING2-NEXT:    [[TMP88]] = or <4 x i32> [[TMP86]], [[TMP84]]
+; VSCALEFORTUNING2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
 ; VSCALEFORTUNING2-NEXT:    [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
 ; VSCALEFORTUNING2-NEXT:    br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
 ; VSCALEFORTUNING2:       [[MIDDLE_BLOCK]]:
-; VSCALEFORTUNING2-NEXT:    [[BIN_RDX:%.*]] = or <vscale x 4 x i32> [[TMP48]], [[TMP47]]
-; VSCALEFORTUNING2-NEXT:    [[TMP50:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
-; VSCALEFORTUNING2-NEXT:    [[TMP51:%.*]] = call i32 @llvm.vscale.i32()
-; VSCALEFORTUNING2-NEXT:    [[TMP52:%.*]] = mul nuw i32 [[TMP51]], 4
-; VSCALEFORTUNING2-NEXT:    [[TMP53:%.*]] = sub i32 [[TMP52]], 1
-; VSCALEFORTUNING2-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 4 x i32> [[TMP26]], i32 [[TMP53]]
+; VSCALEFORTUNING2-NEXT:    [[BIN_RDX:%.*]] = or <4 x i32> [[TMP88]], [[TMP87]]
+; VSCALEFORTUNING2-NEXT:    [[TMP90:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[BIN_RDX]])
+; VSCALEFORTUNING2-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP20]], i32 3
 ; VSCALEFORTUNING2-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
 ; VSCALEFORTUNING2-NEXT:    br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
 ; VSCALEFORTUNING2:       [[SCALAR_PH]]:
 ; VSCALEFORTUNING2-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[TMP24]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; VSCALEFORTUNING2-NEXT:    [[SCALAR_RECUR_INIT11:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; VSCALEFORTUNING2-NEXT:    [[SCALAR_RECUR_INIT7:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
 ; VSCALEFORTUNING2-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; VSCALEFORTUNING2-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP50]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; VSCALEFORTUNING2-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP90]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
 ; VSCALEFORTUNING2-NEXT:    br label %[[LOOP:.*]]
 ; VSCALEFORTUNING2:       [[LOOP]]:
-; VSCALEFORTUNING2-NEXT:    [[TMP54:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP57:%.*]], %[[LOOP]] ]
-; VSCALEFORTUNING2-NEXT:    [[TMP55:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT11]], %[[SCALAR_PH]] ], [ [[TMP54]], %[[LOOP]] ]
+; VSCALEFORTUNING2-NEXT:    [[TMP91:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP57:%.*]], %[[LOOP]] ]
+; VSCALEFORTUNING2-NEXT:    [[TMP55:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT7]], %[[SCALAR_PH]] ], [ [[TMP91]], %[[LOOP]] ]
 ; VSCALEFORTUNING2-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
 ; VSCALEFORTUNING2-NEXT:    [[SUM_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ]
 ; VSCALEFORTUNING2-NEXT:    [[TMP56:%.*]] = add i64 [[Y]], 1
@@ -169,7 +324,7 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
 ; VSCALEFORTUNING2-NEXT:    [[EC:%.*]] = icmp eq i64 [[IV]], [[Y]]
 ; VSCALEFORTUNING2-NEXT:    br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
 ; VSCALEFORTUNING2:       [[EXIT]]:
-; VSCALEFORTUNING2-NEXT:    [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], %[[LOOP]] ], [ [[TMP50]], %[[MIDDLE_BLOCK]] ]
+; VSCALEFORTUNING2-NEXT:    [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], %[[LOOP]] ], [ [[TMP90]], %[[MIDDLE_BLOCK]] ]
 ; VSCALEFORTUNING2-NEXT:    ret i32 [[RED_2_LCSSA]]
 ;
 ; PRED-LABEL: define i32 @chained_recurrences(
@@ -316,7 +471,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
 ; DEFAULT-NEXT:    [[TMP22]] = or <vscale x 4 x i16> [[TMP20]], [[VEC_PHI1]]
 ; DEFAULT-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
 ; DEFAULT-NEXT:    [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; DEFAULT-NEXT:    br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; DEFAULT-NEXT:    br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
 ; DEFAULT:       [[MIDDLE_BLOCK]]:
 ; DEFAULT-NEXT:    [[BIN_RDX:%.*]] = or <vscale x 4 x i16> [[TMP22]], [[TMP21]]
 ; DEFAULT-NEXT:    [[TMP24:%.*]] = call i16 @llvm.vector.reduce.or.nxv4i16(<vscale x 4 x i16> [[BIN_RDX]])
@@ -335,7 +490,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
 ; DEFAULT-NEXT:    [[RED_NEXT]] = or i16 [[DIV]], [[RED]]
 ; DEFAULT-NEXT:    [[IV_NEXT]] = add i64 [[IV]], 1
 ; DEFAULT-NEXT:    [[EC:%.*]] = icmp eq i64 [[IV]], [[N]]
-; DEFAULT-NEXT:    br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; DEFAULT-NEXT:    br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
 ; DEFAULT:       [[EXIT]]:
 ; DEFAULT-NEXT:    [[RED_NEXT_LCSSA:%.*]] = phi i16 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP24]], %[[MIDDLE_BLOCK]] ]
 ; DEFAULT-NEXT:    ret i16 [[RED_NEXT_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll
index fceab6f823d5a..8528bbd0e9812 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll
@@ -60,9 +60,17 @@ define void @replicating_load_used_as_store_addr_2(ptr noalias %invar.dst, ptr n
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr i128, ptr [[SRC]], i64 [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = add i32 [[TMP3]], 123
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i32 0
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <4 x i32> [[TMP7]], i32 [[TMP12]], i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP13]], i32 2
+; CHECK-NEXT:    [[TMP10:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP6]], i32 3
+; CHECK-NEXT:    [[TMP11:%.*]] = add <4 x i32> [[TMP10]], splat (i32 123)
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP11]], i32 3
 ; CHECK-NEXT:    store i32 [[TMP4]], ptr [[INVAR_DST]], align 8
-; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
 ; CHECK-NEXT:    br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
@@ -103,20 +111,111 @@ define void @replicating_load_used_as_store_addr_3(ptr noalias %src, ptr noalias
 ; CHECK:       [[VECTOR_BODY]]:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP19:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP52:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP68:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP85:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP10:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP11:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP14:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP15:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP17:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = load i8, ptr [[TMP2]], align 1
 ; CHECK-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
-; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP4]], 111
+; CHECK-NEXT:    [[TMP20:%.*]] = zext i8 [[TMP19]] to i32
+; CHECK-NEXT:    [[TMP21:%.*]] = zext i8 [[TMP5]] to i32
+; CHECK-NEXT:    [[TMP22:%.*]] = zext i8 [[TMP52]] to i32
+; CHECK-NEXT:    [[TMP23:%.*]] = zext i8 [[TMP68]] to i32
+; CHECK-NEXT:    [[TMP24:%.*]] = zext i8 [[TMP85]] to i32
+; CHECK-NEXT:    [[TMP25:%.*]] = zext i8 [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i8 [[TMP10]] to i32
+; CHECK-NEXT:    [[TMP27:%.*]] = zext i8 [[TMP11]] to i32
+; CHECK-NEXT:    [[TMP28:%.*]] = zext i8 [[TMP12]] to i32
+; CHECK-NEXT:    [[TMP29:%.*]] = zext i8 [[TMP13]] to i32
+; CHECK-NEXT:    [[TMP30:%.*]] = zext i8 [[TMP14]] to i32
+; CHECK-NEXT:    [[TMP31:%.*]] = zext i8 [[TMP15]] to i32
+; CHECK-NEXT:    [[TMP32:%.*]] = zext i8 [[TMP16]] to i32
+; CHECK-NEXT:    [[TMP33:%.*]] = zext i8 [[TMP17]] to i32
+; CHECK-NEXT:    [[TMP34:%.*]] = zext i8 [[TMP18]] to i32
+; CHECK-NEXT:    [[TMP35:%.*]] = insertelement <16 x i32> poison, i32 [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP36:%.*]] = insertelement <16 x i32> [[TMP35]], i32 [[TMP20]], i32 1
+; CHECK-NEXT:    [[TMP37:%.*]] = insertelement <16 x i32> [[TMP36]], i32 [[TMP21]], i32 2
+; CHECK-NEXT:    [[TMP38:%.*]] = insertelement <16 x i32> [[TMP37]], i32 [[TMP22]], i32 3
+; CHECK-NEXT:    [[TMP39:%.*]] = insertelement <16 x i32> [[TMP38]], i32 [[TMP23]], i32 4
+; CHECK-NEXT:    [[TMP40:%.*]] = insertelement <16 x i32> [[TMP39]], i32 [[TMP24]], i32 5
+; CHECK-NEXT:    [[TMP41:%.*]] = insertelement <16 x i32> [[TMP40]], i32 [[TMP25]], i32 6
+; CHECK-NEXT:    [[TMP42:%.*]] = insertelement <16 x i32> [[TMP41]], i32 [[TMP26]], i32 7
+; CHECK-NEXT:    [[TMP43:%.*]] = insertelement <16 x i32> [[TMP42]], i32 [[TMP27]], i32 8
+; CHECK-NEXT:    [[TMP44:%.*]] = insertelement <16 x i32> [[TMP43]], i32 [[TMP28]], i32 9
+; CHECK-NEXT:    [[TMP45:%.*]] = insertelement <16 x i32> [[TMP44]], i32 [[TMP29]], i32 10
+; CHECK-NEXT:    [[TMP46:%.*]] = insertelement <16 x i32> [[TMP45]], i32 [[TMP30]], i32 11
+; CHECK-NEXT:    [[TMP47:%.*]] = insertelement <16 x i32> [[TMP46]], i32 [[TMP31]], i32 12
+; CHECK-NEXT:    [[TMP48:%.*]] = insertelement <16 x i32> [[TMP47]], i32 [[TMP32]], i32 13
+; CHECK-NEXT:    [[TMP49:%.*]] = insertelement <16 x i32> [[TMP48]], i32 [[TMP33]], i32 14
+; CHECK-NEXT:    [[TMP50:%.*]] = insertelement <16 x i32> [[TMP49]], i32 [[TMP34]], i32 15
+; CHECK-NEXT:    [[TMP51:%.*]] = xor <16 x i32> [[TMP50]], splat (i32 111)
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT:    [[TMP53:%.*]] = zext i32 [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP54:%.*]] = zext i32 [[TMP21]] to i64
+; CHECK-NEXT:    [[TMP55:%.*]] = zext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP56:%.*]] = zext i32 [[TMP23]] to i64
+; CHECK-NEXT:    [[TMP57:%.*]] = zext i32 [[TMP24]] to i64
+; CHECK-NEXT:    [[TMP58:%.*]] = zext i32 [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP59:%.*]] = zext i32 [[TMP26]] to i64
+; CHECK-NEXT:    [[TMP60:%.*]] = zext i32 [[TMP27]] to i64
+; CHECK-NEXT:    [[TMP61:%.*]] = zext i32 [[TMP28]] to i64
+; CHECK-NEXT:    [[TMP62:%.*]] = zext i32 [[TMP29]] to i64
+; CHECK-NEXT:    [[TMP63:%.*]] = zext i32 [[TMP30]] to i64
+; CHECK-NEXT:    [[TMP64:%.*]] = zext i32 [[TMP31]] to i64
+; CHECK-NEXT:    [[TMP65:%.*]] = zext i32 [[TMP32]] to i64
+; CHECK-NEXT:    [[TMP66:%.*]] = zext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP67:%.*]] = zext i32 [[TMP34]] to i64
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP6]]
+; CHECK-NEXT:    [[TMP69:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP53]]
+; CHECK-NEXT:    [[TMP70:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP54]]
+; CHECK-NEXT:    [[TMP71:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP55]]
+; CHECK-NEXT:    [[TMP72:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP56]]
+; CHECK-NEXT:    [[TMP73:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP57]]
+; CHECK-NEXT:    [[TMP74:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP58]]
+; CHECK-NEXT:    [[TMP75:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP59]]
+; CHECK-NEXT:    [[TMP76:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP60]]
+; CHECK-NEXT:    [[TMP77:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP61]]
+; CHECK-NEXT:    [[TMP78:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP62]]
+; CHECK-NEXT:    [[TMP79:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP63]]
+; CHECK-NEXT:    [[TMP80:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP64]]
+; CHECK-NEXT:    [[TMP81:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP65]]
+; CHECK-NEXT:    [[TMP82:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP66]]
+; CHECK-NEXT:    [[TMP83:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP67]]
 ; CHECK-NEXT:    store i8 0, ptr [[TMP7]], align 1
-; CHECK-NEXT:    [[TMP8:%.*]] = trunc i32 [[TMP5]] to i8
+; CHECK-NEXT:    store i8 0, ptr [[TMP69]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP70]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP71]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP72]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP73]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP74]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP75]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP76]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP77]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP78]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP79]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP80]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP81]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP82]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP83]], align 1
+; CHECK-NEXT:    [[TMP84:%.*]] = trunc <16 x i32> [[TMP51]] to <16 x i8>
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <16 x i8> [[TMP84]], i32 15
 ; CHECK-NEXT:    store i8 [[TMP8]], ptr [[INVAR_DST]], align 1
-; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
-; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
-; CHECK-NEXT:    br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-NEXT:    [[TMP86:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96
+; CHECK-NEXT:    br i1 [[TMP86]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
-; CHECK-NEXT:    br label %[[EXIT:.*]]
-; CHECK:       [[EXIT]]:
-; CHECK-NEXT:    ret void
+; CHECK-NEXT:    br label %[[SCALAR_PH:.*]]
+; CHECK:       [[SCALAR_PH]]:
 ;
 entry:
   br label %loop
@@ -171,7 +270,7 @@ define void @uniform_gep_for_replicating_gep(ptr %dst) {
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
 ; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <2 x i32> [[STEP_ADD]], splat (i32 2)
 ; CHECK-NEXT:    [[TMP24:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128
-; CHECK-NEXT:    br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    br label %[[SCALAR_PH:.*]]
 ; CHECK:       [[SCALAR_PH]]:
@@ -239,7 +338,7 @@ define void @test_load_gep_widen_induction(ptr noalias %dst, ptr noalias %dst2)
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 8
 ; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD_3]], splat (i64 2)
 ; CHECK-NEXT:    [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96
-; CHECK-NEXT:    br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    br label %[[SCALAR_PH:.*]]
 ; CHECK:       [[SCALAR_PH]]:
@@ -302,7 +401,7 @@ define ptr @replicating_store_in_conditional_latch(ptr %p, i32 %n) #0 {
 ; CHECK-NEXT:    store ptr null, ptr [[TMP15]], align 8
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
 ; CHECK-NEXT:    [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    br label %[[SCALAR_PH]]
 ; CHECK:       [[SCALAR_PH]]:
@@ -467,21 +566,21 @@ define void @test_prefer_vector_addressing(ptr %start, ptr %ms, ptr noalias %src
 ; CHECK-NEXT:    [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP11]]
 ; CHECK-NEXT:    [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP12]]
 ; CHECK-NEXT:    [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP13]]
-; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr [[NEXT_GEP]], align 1, !tbaa [[LONG_LONG_TBAA12:![0-9]+]]
-; CHECK-NEXT:    [[TMP15:%.*]] = load i64, ptr [[NEXT_GEP3]], align 1, !tbaa [[LONG_LONG_TBAA12]]
-; CHECK-NEXT:    [[TMP16:%.*]] = load i64, ptr [[NEXT_GEP4]], align 1, !tbaa [[LONG_LONG_TBAA12]]
-; CHECK-NEXT:    [[TMP17:%.*]] = load i64, ptr [[NEXT_GEP5]], align 1, !tbaa [[LONG_LONG_TBAA12]]
+; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr [[NEXT_GEP]], align 1, !tbaa [[LONG_LONG_TBAA13:![0-9]+]]
+; CHECK-NEXT:    [[TMP15:%.*]] = load i64, ptr [[NEXT_GEP3]], align 1, !tbaa [[LONG_LONG_TBAA13]]
+; CHECK-NEXT:    [[TMP16:%.*]] = load i64, ptr [[NEXT_GEP4]], align 1, !tbaa [[LONG_LONG_TBAA13]]
+; CHECK-NEXT:    [[TMP17:%.*]] = load i64, ptr [[NEXT_GEP5]], align 1, !tbaa [[LONG_LONG_TBAA13]]
 ; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP14]]
 ; CHECK-NEXT:    [[TMP19:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP15]]
 ; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP16]]
 ; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP17]]
-; CHECK-NEXT:    store i32 0, ptr [[TMP18]], align 4, !tbaa [[INT_TBAA17:![0-9]+]]
-; CHECK-NEXT:    store i32 0, ptr [[TMP19]], align 4, !tbaa [[INT_TBAA17]]
-; CHECK-NEXT:    store i32 0, ptr [[TMP20]], align 4, !tbaa [[INT_TBAA17]]
-; CHECK-NEXT:    store i32 0, ptr [[TMP21]], align 4, !tbaa [[INT_TBAA17]]
+; CHECK-NEXT:    store i32 0, ptr [[TMP18]], align 4, !tbaa [[INT_TBAA18:![0-9]+]]
+; CHECK-NEXT:    store i32 0, ptr [[TMP19]], align 4, !tbaa [[INT_TBAA18]]
+; CHECK-NEXT:    store i32 0, ptr [[TMP20]], align 4, !tbaa [[INT_TBAA18]]
+; CHECK-NEXT:    store i32 0, ptr [[TMP21]], align 4, !tbaa [[INT_TBAA18]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
 ; CHECK-NEXT:    [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP6]], [[N_VEC]]
 ; CHECK-NEXT:    br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
@@ -582,7 +681,7 @@ define double @test_scalarization_cost_for_load_of_address(ptr %src.0, ptr %src.
 ; CHECK-NEXT:    [[TMP20:%.*]] = fmul <2 x double> [[TMP9]], [[TMP19]]
 ; CHECK-NEXT:    [[TMP21]] = call double @llvm.vector.reduce.fadd.v2f64(double [[VEC_PHI]], <2 x double> [[TMP20]])
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
-; CHECK-NEXT:    br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-NEXT:    br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    br label %[[EXIT:.*]]
 ; CHECK:       [[EXIT]]:
@@ -743,7 +842,7 @@ define i32 @test_or_reduction_with_stride_2(i32 %scale, ptr %src) {
 ; CHECK-NEXT:    [[TMP66]] = or <16 x i32> [[TMP65]], [[VEC_PHI]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
 ; CHECK-NEXT:    [[TMP67:%.*]] = icmp eq i64 [[INDEX_NEXT]], 48
-; CHECK-NEXT:    br i1 [[TMP67]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP67]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    [[TMP68:%.*]] = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[TMP66]])
 ; CHECK-NEXT:    br label %[[SCALAR_PH:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vector-despite-neon-gather.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vector-despite-neon-gather.ll
new file mode 100644
index 0000000000000..47870487a0644
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/vector-despite-neon-gather.ll
@@ -0,0 +1,117 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
+; RUN: opt -p loop-vectorize -S %s | FileCheck %s
+
+; In this test, the hot loop is mostly vectorizable, but with one gather
+; operation that is not. In this case it is still desirable to vectorize.
+
+target triple = "arm64-apple-macosx15.0.0"
+
+define void @decompress_offsets(i64 noundef %0, ptr noalias noundef nonnull readonly align 1 captures(none) %1, i64 noundef %2, ptr noalias noundef readonly align 4 captures(none) dereferenceable(1024) %3, ptr noalias noundef readonly align 4 captures(none) dereferenceable(1024) %4, ptr noalias noundef align 8 captures(none) dereferenceable(2048) %5) {
+; CHECK-LABEL: define void @decompress_offsets(
+; CHECK-SAME: i64 noundef [[TMP0:%.*]], ptr noalias noundef nonnull readonly align 1 captures(none) [[TMP1:%.*]], i64 noundef [[TMP2:%.*]], ptr noalias noundef readonly align 4 captures(none) dereferenceable(1024) [[TMP3:%.*]], ptr noalias noundef readonly align 4 captures(none) dereferenceable(1024) [[TMP4:%.*]], ptr noalias noundef align 8 captures(none) dereferenceable(2048) [[TMP5:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP6:%.*]] = trunc i64 [[TMP0]] to i32
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP7:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT:    [[TMP8:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP4]], i64 [[TMP7]]
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP3]], i64 [[TMP7]]
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP3]], i64 [[TMP8]]
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP3]], i64 [[TMP9]]
+; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP3]], i64 [[TMP10]]
+; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP15]], align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = load i32, ptr [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP18:%.*]] = load i32, ptr [[TMP14]], align 4
+; CHECK-NEXT:    [[TMP28:%.*]] = load i32, ptr [[TMP20]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = add i32 [[TMP19]], [[TMP6]]
+; CHECK-NEXT:    [[TMP29:%.*]] = add i32 [[TMP17]], [[TMP6]]
+; CHECK-NEXT:    [[TMP30:%.*]] = add i32 [[TMP18]], [[TMP6]]
+; CHECK-NEXT:    [[TMP33:%.*]] = add i32 [[TMP28]], [[TMP6]]
+; CHECK-NEXT:    [[TMP24:%.*]] = insertelement <4 x i32> poison, i32 [[TMP23]], i32 0
+; CHECK-NEXT:    [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP29]], i32 1
+; CHECK-NEXT:    [[TMP34:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP30]], i32 2
+; CHECK-NEXT:    [[TMP27:%.*]] = insertelement <4 x i32> [[TMP34]], i32 [[TMP33]], i32 3
+; CHECK-NEXT:    [[TMP31:%.*]] = lshr i32 [[TMP23]], 3
+; CHECK-NEXT:    [[TMP16:%.*]] = lshr i32 [[TMP29]], 3
+; CHECK-NEXT:    [[TMP21:%.*]] = lshr i32 [[TMP30]], 3
+; CHECK-NEXT:    [[TMP35:%.*]] = lshr i32 [[TMP33]], 3
+; CHECK-NEXT:    [[TMP32:%.*]] = and <4 x i32> [[TMP27]], splat (i32 7)
+; CHECK-NEXT:    [[TMP36:%.*]] = zext nneg i32 [[TMP31]] to i64
+; CHECK-NEXT:    [[TMP26:%.*]] = zext nneg i32 [[TMP16]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = zext nneg i32 [[TMP21]] to i64
+; CHECK-NEXT:    [[TMP60:%.*]] = zext nneg i32 [[TMP35]] to i64
+; CHECK-NEXT:    [[TMP37:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 [[TMP36]]
+; CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 [[TMP26]]
+; CHECK-NEXT:    [[TMP39:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 [[TMP22]]
+; CHECK-NEXT:    [[TMP40:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 [[TMP60]]
+; CHECK-NEXT:    [[TMP41:%.*]] = load i64, ptr [[TMP37]], align 1
+; CHECK-NEXT:    [[TMP42:%.*]] = load i64, ptr [[TMP38]], align 1
+; CHECK-NEXT:    [[TMP43:%.*]] = load i64, ptr [[TMP39]], align 1
+; CHECK-NEXT:    [[TMP44:%.*]] = load i64, ptr [[TMP40]], align 1
+; CHECK-NEXT:    [[TMP45:%.*]] = insertelement <4 x i64> poison, i64 [[TMP41]], i32 0
+; CHECK-NEXT:    [[TMP46:%.*]] = insertelement <4 x i64> [[TMP45]], i64 [[TMP42]], i32 1
+; CHECK-NEXT:    [[TMP47:%.*]] = insertelement <4 x i64> [[TMP46]], i64 [[TMP43]], i32 2
+; CHECK-NEXT:    [[TMP48:%.*]] = insertelement <4 x i64> [[TMP47]], i64 [[TMP44]], i32 3
+; CHECK-NEXT:    [[TMP49:%.*]] = zext nneg <4 x i32> [[TMP32]] to <4 x i64>
+; CHECK-NEXT:    [[TMP50:%.*]] = lshr <4 x i64> [[TMP48]], [[TMP49]]
+; CHECK-NEXT:    [[TMP51:%.*]] = and <4 x i32> [[WIDE_LOAD]], splat (i32 63)
+; CHECK-NEXT:    [[TMP52:%.*]] = zext nneg <4 x i32> [[TMP51]] to <4 x i64>
+; CHECK-NEXT:    [[TMP53:%.*]] = shl nsw <4 x i64> splat (i64 -1), [[TMP52]]
+; CHECK-NEXT:    [[TMP54:%.*]] = xor <4 x i64> [[TMP53]], splat (i64 -1)
+; CHECK-NEXT:    [[TMP55:%.*]] = and <4 x i64> [[TMP50]], [[TMP54]]
+; CHECK-NEXT:    [[TMP56:%.*]] = getelementptr inbounds nuw i64, ptr [[TMP5]], i64 [[TMP7]]
+; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP56]], align 8
+; CHECK-NEXT:    [[TMP57:%.*]] = add <4 x i64> [[WIDE_LOAD1]], splat (i64 2147483648)
+; CHECK-NEXT:    [[TMP58:%.*]] = add <4 x i64> [[TMP57]], [[TMP55]]
+; CHECK-NEXT:    store <4 x i64> [[TMP58]], ptr [[TMP56]], align 8
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT:    [[TMP59:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
+; CHECK-NEXT:    br i1 [[TMP59]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+
+entry:
+  %7 = trunc i64 %0 to i32
+  br label %loop
+
+loop:
+  %10 = phi i64 [ 0, %entry ], [ %11, %loop ]
+  %11 = add nuw nsw i64 %10, 1
+  %12 = getelementptr inbounds nuw i32, ptr %4, i64 %10
+  %13 = load i32, ptr %12, align 4
+  %14 = getelementptr inbounds nuw i32, ptr %3, i64 %10
+  %15 = load i32, ptr %14, align 4
+  %16 = add i32 %15, %7
+  %17 = lshr i32 %16, 3
+  %18 = and i32 %16, 7
+  %19 = zext nneg i32 %17 to i64
+  %20 = getelementptr inbounds nuw i8, ptr %1, i64 %19
+  %21 = load i64, ptr %20, align 1
+  %22 = zext nneg i32 %18 to i64
+  %23 = lshr i64 %21, %22
+  %24 = and i32 %13, 63
+  %25 = zext nneg i32 %24 to i64
+  %26 = shl nsw i64 -1, %25
+  %27 = xor i64 %26, -1
+  %28 = and i64 %23, %27
+  %29 = getelementptr inbounds nuw i64, ptr %5, i64 %10
+  %30 = load i64, ptr %29, align 8
+  %31 = add i64 %30, 2147483648
+  %32 = add i64 %31, %28
+  store i64 %32, ptr %29, align 8
+  %33 = icmp eq i64 %11, 256
+  br i1 %33, label %exit, label %loop
+
+exit:
+  ret void
+}



More information about the llvm-commits mailing list