[llvm] [LV][EVL] More lit tests for interleaved access. nfc (PR #152959)
Mel Chen via llvm-commits
llvm-commits at lists.llvm.org
Sun Aug 10 23:47:03 PDT 2025
https://github.com/Mel-Chen updated https://github.com/llvm/llvm-project/pull/152959
>From db0b894a6fec96d26926f151c5677cccc839a789 Mon Sep 17 00:00:00 2001
From: Mel Chen <mel.chen at sifive.com>
Date: Sat, 9 Aug 2025 05:20:07 -0700
Subject: [PATCH] nfc, add tests case for interleaved access
---
.../RISCV/tail-folding-interleave.ll | 1012 +++++++++++++++++
1 file changed, 1012 insertions(+)
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index f1ec586b65e03..4d2204e1603b4 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -123,5 +123,1017 @@ for.cond.cleanup:
ret void
}
+define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) {
+; IF-EVL-LABEL: @load_factor_4_with_gap(
+; IF-EVL-NEXT: entry:
+; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; IF-EVL: vector.ph:
+; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; IF-EVL-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; IF-EVL-NEXT: [[TMP3:%.*]] = mul <vscale x 4 x i64> [[TMP2]], splat (i64 1)
+; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP3]]
+; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
+; IF-EVL: vector.body:
+; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; IF-EVL-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
+; IF-EVL-NEXT: [[TMP6:%.*]] = mul i64 1, [[TMP5]]
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP6]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP7:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP7]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 4 x i64> [[TMP7]], splat (i64 1)
+; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP10]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP9]], [[WIDE_MASKED_GATHER1]]
+; IF-EVL-NEXT: [[TMP13:%.*]] = or disjoint <vscale x 4 x i64> [[TMP7]], splat (i64 3)
+; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP13]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[TMP12]], [[WIDE_MASKED_GATHER2]]
+; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP15]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP4]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]]
+; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; IF-EVL: middle.block:
+; IF-EVL-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
+; IF-EVL-NEXT: br label [[EXIT:%.*]]
+; IF-EVL: scalar.ph:
+; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
+; IF-EVL: for.body:
+; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RED_069:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD11:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP20:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
+; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP21]], [[RED_069]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = or disjoint i64 [[TMP20]], 1
+; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]]
+; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
+; IF-EVL-NEXT: [[ADD6:%.*]] = add nsw i32 [[ADD1]], [[TMP23]]
+; IF-EVL-NEXT: [[TMP24:%.*]] = or disjoint i64 [[TMP20]], 3
+; IF-EVL-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP24]]
+; IF-EVL-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4
+; IF-EVL-NEXT: [[ADD11]] = add nsw i32 [[ADD6]], [[TMP25]]
+; IF-EVL-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; IF-EVL: exit:
+; IF-EVL-NEXT: [[ADD11_LCSSA:%.*]] = phi i32 [ [[ADD11]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
+; IF-EVL-NEXT: ret i32 [[ADD11_LCSSA]]
+;
+; NO-VP-LABEL: @load_factor_4_with_gap(
+; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 1)
+; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP5]]
+; NO-VP-NEXT: [[TMP6:%.*]] = mul i64 1, [[TMP3]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP6]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP7:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP7]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP8]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 4 x i64> [[TMP7]], splat (i64 1)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP10]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP11]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP9]], [[WIDE_MASKED_GATHER1]]
+; NO-VP-NEXT: [[TMP13:%.*]] = or disjoint <vscale x 4 x i64> [[TMP7]], splat (i64 3)
+; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP13]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP14]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP15]] = add <vscale x 4 x i32> [[TMP12]], [[WIDE_MASKED_GATHER2]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP15]])
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; NO-VP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP17]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: for.body:
+; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[RED_069:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD11:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[TMP18:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP18]]
+; NO-VP-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; NO-VP-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP19]], [[RED_069]]
+; NO-VP-NEXT: [[TMP20:%.*]] = or disjoint i64 [[TMP18]], 1
+; NO-VP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
+; NO-VP-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
+; NO-VP-NEXT: [[ADD6:%.*]] = add nsw i32 [[ADD1]], [[TMP21]]
+; NO-VP-NEXT: [[TMP22:%.*]] = or disjoint i64 [[TMP18]], 3
+; NO-VP-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]]
+; NO-VP-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4
+; NO-VP-NEXT: [[ADD11]] = add nsw i32 [[ADD6]], [[TMP23]]
+; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; NO-VP: exit:
+; NO-VP-NEXT: [[ADD11_LCSSA:%.*]] = phi i32 [ [[ADD11]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
+; NO-VP-NEXT: ret i32 [[ADD11_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %rdx = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+ %0 = shl nsw i64 %iv, 2
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %0
+ %1 = load i32, ptr %arrayidx, align 4
+ %add1 = add nsw i32 %1, %rdx
+ %2 = or disjoint i64 %0, 1
+ %arrayidx5 = getelementptr inbounds i32, ptr %a, i64 %2
+ %3 = load i32, ptr %arrayidx5, align 4
+ %add6 = add nsw i32 %add1, %3
+ %4 = or disjoint i64 %0, 3
+ %arrayidx10 = getelementptr inbounds i32, ptr %a, i64 %4
+ %5 = load i32, ptr %arrayidx10, align 4
+ %add11 = add nsw i32 %add6, %5
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret i32 %add11
+}
+
+define void @store_factor_4_with_gap(i64 %n, ptr noalias %a) {
+; IF-EVL-LABEL: @store_factor_4_with_gap(
+; IF-EVL-NEXT: entry:
+; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; IF-EVL: vector.ph:
+; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; IF-EVL-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; IF-EVL-NEXT: [[TMP3:%.*]] = mul <vscale x 4 x i64> [[TMP2]], splat (i64 1)
+; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP3]]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; IF-EVL-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i32> [[TMP4]], splat (i32 1)
+; IF-EVL-NEXT: [[INDUCTION1:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP5]]
+; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
+; IF-EVL: vector.body:
+; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_IND2:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT5:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; IF-EVL-NEXT: [[TMP7:%.*]] = mul i32 1, [[TMP6]]
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
+; IF-EVL-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP8]]
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP10:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP10]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND2]], <vscale x 4 x ptr> align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP12:%.*]] = or disjoint <vscale x 4 x i64> [[TMP10]], splat (i64 1)
+; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP12]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND2]], <vscale x 4 x ptr> align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = or disjoint <vscale x 4 x i64> [[TMP10]], splat (i64 3)
+; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP14]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND2]], <vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP6]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT4]]
+; IF-EVL-NEXT: [[VEC_IND_NEXT5]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL: middle.block:
+; IF-EVL-NEXT: br label [[EXIT:%.*]]
+; IF-EVL: scalar.ph:
+; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
+; IF-EVL: for.body:
+; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP18:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP18]]
+; IF-EVL-NEXT: [[TMP19:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; IF-EVL-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX]], align 4
+; IF-EVL-NEXT: [[TMP20:%.*]] = or disjoint i64 [[TMP18]], 1
+; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
+; IF-EVL-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX4]], align 4
+; IF-EVL-NEXT: [[TMP21:%.*]] = or disjoint i64 [[TMP18]], 3
+; IF-EVL-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP21]]
+; IF-EVL-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX8]], align 4
+; IF-EVL-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; IF-EVL: exit:
+; IF-EVL-NEXT: ret void
+;
+; NO-VP-LABEL: @store_factor_4_with_gap(
+; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 1)
+; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP5]]
+; NO-VP-NEXT: [[TMP6:%.*]] = mul i64 1, [[TMP3]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP6]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i32> [[TMP7]], splat (i32 1)
+; NO-VP-NEXT: [[INDUCTION1:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP8]]
+; NO-VP-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP3]] to i32
+; NO-VP-NEXT: [[TMP10:%.*]] = mul i32 1, [[TMP9]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP10]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT2]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND4:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT5:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP11:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP11]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND4]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; NO-VP-NEXT: [[TMP13:%.*]] = or disjoint <vscale x 4 x i64> [[TMP11]], splat (i64 1)
+; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP13]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND4]], <vscale x 4 x ptr> [[TMP14]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; NO-VP-NEXT: [[TMP15:%.*]] = or disjoint <vscale x 4 x i64> [[TMP11]], splat (i64 3)
+; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP15]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND4]], <vscale x 4 x ptr> [[TMP16]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; NO-VP-NEXT: [[VEC_IND_NEXT5]] = add <vscale x 4 x i32> [[VEC_IND4]], [[BROADCAST_SPLAT3]]
+; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: for.body:
+; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[TMP18:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP18]]
+; NO-VP-NEXT: [[TMP19:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; NO-VP-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX]], align 4
+; NO-VP-NEXT: [[TMP20:%.*]] = or disjoint i64 [[TMP18]], 1
+; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
+; NO-VP-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX4]], align 4
+; NO-VP-NEXT: [[TMP21:%.*]] = or disjoint i64 [[TMP18]], 3
+; NO-VP-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP21]]
+; NO-VP-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX8]], align 4
+; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; NO-VP: exit:
+; NO-VP-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %0 = shl nsw i64 %iv, 2
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %0
+ %1 = trunc i64 %iv to i32
+ store i32 %1, ptr %arrayidx, align 4
+ %2 = or disjoint i64 %0, 1
+ %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %2
+ store i32 %1, ptr %arrayidx4, align 4
+ %3 = or disjoint i64 %0, 3
+ %arrayidx8 = getelementptr inbounds i32, ptr %a, i64 %3
+ store i32 %1, ptr %arrayidx8, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
+; IF-EVL-LABEL: @load_factor_4_with_tail_gap(
+; IF-EVL-NEXT: entry:
+; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; IF-EVL: vector.ph:
+; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; IF-EVL-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; IF-EVL-NEXT: [[TMP3:%.*]] = mul <vscale x 4 x i64> [[TMP2]], splat (i64 1)
+; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP3]]
+; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
+; IF-EVL: vector.body:
+; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; IF-EVL-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
+; IF-EVL-NEXT: [[TMP6:%.*]] = mul i64 1, [[TMP5]]
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP6]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP7:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP7]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = or disjoint <vscale x 4 x i64> [[TMP7]], splat (i64 1)
+; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP10]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP9]], [[WIDE_MASKED_GATHER1]]
+; IF-EVL-NEXT: [[TMP13:%.*]] = or disjoint <vscale x 4 x i64> [[TMP7]], splat (i64 2)
+; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP13]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[TMP12]], [[WIDE_MASKED_GATHER2]]
+; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP15]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP4]])
+; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP4]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]]
+; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL: middle.block:
+; IF-EVL-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
+; IF-EVL-NEXT: br label [[EXIT:%.*]]
+; IF-EVL: scalar.ph:
+; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
+; IF-EVL: for.body:
+; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RED_069:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD11:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP20:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
+; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP21]], [[RED_069]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = or disjoint i64 [[TMP20]], 1
+; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]]
+; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
+; IF-EVL-NEXT: [[ADD6:%.*]] = add nsw i32 [[ADD1]], [[TMP23]]
+; IF-EVL-NEXT: [[TMP24:%.*]] = or disjoint i64 [[TMP20]], 2
+; IF-EVL-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP24]]
+; IF-EVL-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4
+; IF-EVL-NEXT: [[ADD11]] = add nsw i32 [[ADD6]], [[TMP25]]
+; IF-EVL-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; IF-EVL: exit:
+; IF-EVL-NEXT: [[ADD11_LCSSA:%.*]] = phi i32 [ [[ADD11]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
+; IF-EVL-NEXT: ret i32 [[ADD11_LCSSA]]
+;
+; NO-VP-LABEL: @load_factor_4_with_tail_gap(
+; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[N:%.*]], [[TMP1]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; NO-VP-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
+; NO-VP-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[TMP5]]
+; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 1)
+; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]]
+; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP3]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP9]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP10]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP12:%.*]] = or disjoint <vscale x 4 x i64> [[TMP9]], splat (i64 1)
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP12]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP13]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER1]]
+; NO-VP-NEXT: [[TMP15:%.*]] = or disjoint <vscale x 4 x i64> [[TMP9]], splat (i64 2)
+; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP15]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP16]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP17]] = add <vscale x 4 x i32> [[TMP14]], [[WIDE_MASKED_GATHER2]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; NO-VP-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP17]])
+; NO-VP-NEXT: br label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; NO-VP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: for.body:
+; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[RED_069:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD11:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[TMP20:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
+; NO-VP-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; NO-VP-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP21]], [[RED_069]]
+; NO-VP-NEXT: [[TMP22:%.*]] = or disjoint i64 [[TMP20]], 1
+; NO-VP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]]
+; NO-VP-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
+; NO-VP-NEXT: [[ADD6:%.*]] = add nsw i32 [[ADD1]], [[TMP23]]
+; NO-VP-NEXT: [[TMP24:%.*]] = or disjoint i64 [[TMP20]], 2
+; NO-VP-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP24]]
+; NO-VP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4
+; NO-VP-NEXT: [[ADD11]] = add nsw i32 [[ADD6]], [[TMP25]]
+; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; NO-VP: exit:
+; NO-VP-NEXT: [[ADD11_LCSSA:%.*]] = phi i32 [ [[ADD11]], [[FOR_BODY]] ]
+; NO-VP-NEXT: ret i32 [[ADD11_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %rdx = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+ %0 = shl nsw i64 %iv, 2
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %0
+ %1 = load i32, ptr %arrayidx, align 4
+ %add1 = add nsw i32 %1, %rdx
+ %2 = or disjoint i64 %0, 1
+ %arrayidx5 = getelementptr inbounds i32, ptr %a, i64 %2
+ %3 = load i32, ptr %arrayidx5, align 4
+ %add6 = add nsw i32 %add1, %3
+ %4 = or disjoint i64 %0, 2
+ %arrayidx10 = getelementptr inbounds i32, ptr %a, i64 %4
+ %5 = load i32, ptr %arrayidx10, align 4
+ %add11 = add nsw i32 %add6, %5
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret i32 %add11
+}
+
+define void @store_factor_4_with_tail_gap(i64 %n, ptr noalias %a) {
+; IF-EVL-LABEL: @store_factor_4_with_tail_gap(
+; IF-EVL-NEXT: entry:
+; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; IF-EVL: vector.ph:
+; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; IF-EVL-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; IF-EVL-NEXT: [[TMP3:%.*]] = mul <vscale x 4 x i64> [[TMP2]], splat (i64 1)
+; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP3]]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; IF-EVL-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i32> [[TMP4]], splat (i32 1)
+; IF-EVL-NEXT: [[INDUCTION1:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP5]]
+; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
+; IF-EVL: vector.body:
+; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_IND2:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT5:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; IF-EVL-NEXT: [[TMP7:%.*]] = mul i32 1, [[TMP6]]
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
+; IF-EVL-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP8]]
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP10:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP10]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND2]], <vscale x 4 x ptr> align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP12:%.*]] = or disjoint <vscale x 4 x i64> [[TMP10]], splat (i64 1)
+; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP12]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND2]], <vscale x 4 x ptr> align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = or disjoint <vscale x 4 x i64> [[TMP10]], splat (i64 2)
+; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP14]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND2]], <vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP6]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT4]]
+; IF-EVL-NEXT: [[VEC_IND_NEXT5]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; IF-EVL: middle.block:
+; IF-EVL-NEXT: br label [[EXIT:%.*]]
+; IF-EVL: scalar.ph:
+; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
+; IF-EVL: for.body:
+; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP18:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP18]]
+; IF-EVL-NEXT: [[TMP19:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; IF-EVL-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX]], align 4
+; IF-EVL-NEXT: [[TMP20:%.*]] = or disjoint i64 [[TMP18]], 1
+; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
+; IF-EVL-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX4]], align 4
+; IF-EVL-NEXT: [[TMP21:%.*]] = or disjoint i64 [[TMP18]], 2
+; IF-EVL-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP21]]
+; IF-EVL-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX8]], align 4
+; IF-EVL-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; IF-EVL: exit:
+; IF-EVL-NEXT: ret void
+;
+; NO-VP-LABEL: @store_factor_4_with_tail_gap(
+; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 1)
+; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP5]]
+; NO-VP-NEXT: [[TMP6:%.*]] = mul i64 1, [[TMP3]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP6]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i32> [[TMP7]], splat (i32 1)
+; NO-VP-NEXT: [[INDUCTION1:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP8]]
+; NO-VP-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP3]] to i32
+; NO-VP-NEXT: [[TMP10:%.*]] = mul i32 1, [[TMP9]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP10]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT2]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND4:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT5:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP11:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP11]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND4]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; NO-VP-NEXT: [[TMP13:%.*]] = or disjoint <vscale x 4 x i64> [[TMP11]], splat (i64 1)
+; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP13]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND4]], <vscale x 4 x ptr> [[TMP14]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; NO-VP-NEXT: [[TMP15:%.*]] = or disjoint <vscale x 4 x i64> [[TMP11]], splat (i64 2)
+; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP15]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND4]], <vscale x 4 x ptr> [[TMP16]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; NO-VP-NEXT: [[VEC_IND_NEXT5]] = add <vscale x 4 x i32> [[VEC_IND4]], [[BROADCAST_SPLAT3]]
+; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: for.body:
+; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[TMP18:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP18]]
+; NO-VP-NEXT: [[TMP19:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; NO-VP-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX]], align 4
+; NO-VP-NEXT: [[TMP20:%.*]] = or disjoint i64 [[TMP18]], 1
+; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
+; NO-VP-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX4]], align 4
+; NO-VP-NEXT: [[TMP21:%.*]] = or disjoint i64 [[TMP18]], 2
+; NO-VP-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP21]]
+; NO-VP-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX8]], align 4
+; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; NO-VP: exit:
+; NO-VP-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %0 = shl nsw i64 %iv, 2
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %0
+ %1 = trunc i64 %iv to i32
+ store i32 %1, ptr %arrayidx, align 4
+ %2 = or disjoint i64 %0, 1
+ %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %2
+ store i32 %1, ptr %arrayidx4, align 4
+ %3 = or disjoint i64 %0, 2
+ %arrayidx8 = getelementptr inbounds i32, ptr %a, i64 %3
+ store i32 %1, ptr %arrayidx8, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) {
+; IF-EVL-LABEL: @load_factor_4_reverse(
+; IF-EVL-NEXT: entry:
+; IF-EVL-NEXT: [[TMP0:%.*]] = add nsw i64 [[N:%.*]], -1
+; IF-EVL-NEXT: [[SMIN:%.*]] = call i64 @llvm.smin.i64(i64 [[TMP0]], i64 0)
+; IF-EVL-NEXT: [[TMP1:%.*]] = sub i64 [[N]], [[SMIN]]
+; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; IF-EVL: vector.ph:
+; IF-EVL-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; IF-EVL-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[N]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 -1)
+; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP5]]
+; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
+; IF-EVL: vector.body:
+; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP1]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; IF-EVL-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
+; IF-EVL-NEXT: [[TMP8:%.*]] = mul i64 -1, [[TMP7]]
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP9:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP9]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
+; IF-EVL-NEXT: [[TMP12:%.*]] = or disjoint <vscale x 4 x i64> [[TMP9]], splat (i64 1)
+; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP12]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[TMP11]], [[WIDE_MASKED_GATHER3]]
+; IF-EVL-NEXT: [[TMP15:%.*]] = or disjoint <vscale x 4 x i64> [[TMP9]], splat (i64 2)
+; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP15]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[TMP14]], [[WIDE_MASKED_GATHER4]]
+; IF-EVL-NEXT: [[TMP18:%.*]] = or disjoint <vscale x 4 x i64> [[TMP9]], splat (i64 3)
+; IF-EVL-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP18]]
+; IF-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[TMP17]], [[WIDE_MASKED_GATHER5]]
+; IF-EVL-NEXT: [[TMP21]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP20]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP6]])
+; IF-EVL-NEXT: [[TMP22:%.*]] = zext i32 [[TMP6]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP22]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; IF-EVL-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP1]]
+; IF-EVL-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; IF-EVL: middle.block:
+; IF-EVL-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP21]])
+; IF-EVL-NEXT: br label [[EXIT:%.*]]
+; IF-EVL: scalar.ph:
+; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
+; IF-EVL: for.body:
+; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[N]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RED_069:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD12:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP25:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP25]]
+; IF-EVL-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP26]], [[RED_069]]
+; IF-EVL-NEXT: [[TMP27:%.*]] = or disjoint i64 [[TMP25]], 1
+; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP27]]
+; IF-EVL-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
+; IF-EVL-NEXT: [[ADD6:%.*]] = add nsw i32 [[ADD1]], [[TMP28]]
+; IF-EVL-NEXT: [[TMP29:%.*]] = or disjoint i64 [[TMP25]], 2
+; IF-EVL-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP29]]
+; IF-EVL-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4
+; IF-EVL-NEXT: [[ADD11:%.*]] = add nsw i32 [[ADD6]], [[TMP30]]
+; IF-EVL-NEXT: [[TMP31:%.*]] = or disjoint i64 [[TMP25]], 3
+; IF-EVL-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP31]]
+; IF-EVL-NEXT: [[TMP32:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4
+; IF-EVL-NEXT: [[ADD12]] = add nsw i32 [[ADD11]], [[TMP32]]
+; IF-EVL-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
+; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVARS_IV_NEXT]], 0
+; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP14:![0-9]+]]
+; IF-EVL: exit:
+; IF-EVL-NEXT: [[ADD12_LCSSA:%.*]] = phi i32 [ [[ADD12]], [[FOR_BODY]] ], [ [[TMP24]], [[MIDDLE_BLOCK]] ]
+; IF-EVL-NEXT: ret i32 [[ADD12_LCSSA]]
+;
+; NO-VP-LABEL: @load_factor_4_reverse(
+; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = add nsw i64 [[N:%.*]], -1
+; NO-VP-NEXT: [[SMIN:%.*]] = call i64 @llvm.smin.i64(i64 [[TMP0]], i64 0)
+; NO-VP-NEXT: [[TMP1:%.*]] = sub i64 [[N]], [[SMIN]]
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[N]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP7]], splat (i64 -1)
+; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP8]]
+; NO-VP-NEXT: [[TMP9:%.*]] = mul i64 -1, [[TMP5]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP10:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP10]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP11]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP13:%.*]] = or disjoint <vscale x 4 x i64> [[TMP10]], splat (i64 1)
+; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP13]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP14]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[TMP12]], [[WIDE_MASKED_GATHER3]]
+; NO-VP-NEXT: [[TMP16:%.*]] = or disjoint <vscale x 4 x i64> [[TMP10]], splat (i64 2)
+; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP16]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP17]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP18:%.*]] = add <vscale x 4 x i32> [[TMP15]], [[WIDE_MASKED_GATHER4]]
+; NO-VP-NEXT: [[TMP19:%.*]] = or disjoint <vscale x 4 x i64> [[TMP10]], splat (i64 3)
+; NO-VP-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP19]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP20]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP21]] = add <vscale x 4 x i32> [[TMP18]], [[WIDE_MASKED_GATHER5]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; NO-VP-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP21]])
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[N]], [[ENTRY:%.*]] ]
+; NO-VP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP23]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: for.body:
+; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[RED_069:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD12:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[TMP24:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP24]]
+; NO-VP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; NO-VP-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP25]], [[RED_069]]
+; NO-VP-NEXT: [[TMP26:%.*]] = or disjoint i64 [[TMP24]], 1
+; NO-VP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP26]]
+; NO-VP-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
+; NO-VP-NEXT: [[ADD6:%.*]] = add nsw i32 [[ADD1]], [[TMP27]]
+; NO-VP-NEXT: [[TMP28:%.*]] = or disjoint i64 [[TMP24]], 2
+; NO-VP-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP28]]
+; NO-VP-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4
+; NO-VP-NEXT: [[ADD11:%.*]] = add nsw i32 [[ADD6]], [[TMP29]]
+; NO-VP-NEXT: [[TMP30:%.*]] = or disjoint i64 [[TMP24]], 3
+; NO-VP-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP30]]
+; NO-VP-NEXT: [[TMP31:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4
+; NO-VP-NEXT: [[ADD12]] = add nsw i32 [[ADD11]], [[TMP31]]
+; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
+; NO-VP-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVARS_IV_NEXT]], 0
+; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP13:![0-9]+]]
+; NO-VP: exit:
+; NO-VP-NEXT: [[ADD12_LCSSA:%.*]] = phi i32 [ [[ADD12]], [[FOR_BODY]] ], [ [[TMP23]], [[MIDDLE_BLOCK]] ]
+; NO-VP-NEXT: ret i32 [[ADD12_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ %n, %entry ], [ %iv.next, %for.body ]
+ %rdx = phi i32 [ 0, %entry ], [ %add12, %for.body ]
+ %0 = shl nsw i64 %iv, 2
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %0
+ %1 = load i32, ptr %arrayidx, align 4
+ %add1 = add nsw i32 %1, %rdx
+ %2 = or disjoint i64 %0, 1
+ %arrayidx5 = getelementptr inbounds i32, ptr %a, i64 %2
+ %3 = load i32, ptr %arrayidx5, align 4
+ %add6 = add nsw i32 %add1, %3
+ %4 = or disjoint i64 %0, 2
+ %arrayidx10 = getelementptr inbounds i32, ptr %a, i64 %4
+ %5 = load i32, ptr %arrayidx10, align 4
+ %add11 = add nsw i32 %add6, %5
+ %6 = or disjoint i64 %0, 3
+ %arrayidx7 = getelementptr inbounds i32, ptr %a, i64 %6
+ %8 = load i32, ptr %arrayidx7, align 4
+ %add12 = add nsw i32 %add11, %8
+ %iv.next = add nsw i64 %iv, -1
+ %exitcond = icmp sgt i64 %iv.next, 0
+ br i1 %exitcond, label %for.body, label %exit
+
+exit:
+ ret i32 %add12
+}
+
+define void @store_factor_4_reverse(i64 %n, ptr noalias %a) {
+; IF-EVL-LABEL: @store_factor_4_reverse(
+; IF-EVL-NEXT: entry:
+; IF-EVL-NEXT: [[TMP0:%.*]] = add nsw i64 [[N:%.*]], -1
+; IF-EVL-NEXT: [[SMIN:%.*]] = call i64 @llvm.smin.i64(i64 [[TMP0]], i64 0)
+; IF-EVL-NEXT: [[TMP1:%.*]] = sub i64 [[N]], [[SMIN]]
+; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; IF-EVL: vector.ph:
+; IF-EVL-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; IF-EVL-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[N]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP5:%.*]] = mul <vscale x 4 x i64> [[TMP4]], splat (i64 -1)
+; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP5]]
+; IF-EVL-NEXT: [[TMP6:%.*]] = trunc i64 [[N]] to i32
+; IF-EVL-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP6]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i32> [[TMP7]], splat (i32 -1)
+; IF-EVL-NEXT: [[INDUCTION3:%.*]] = add <vscale x 4 x i32> [[BROADCAST_SPLAT2]], [[TMP8]]
+; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
+; IF-EVL: vector.body:
+; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_IND4:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT9:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP1]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul i32 -1, [[TMP9]]
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP10]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT5]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
+; IF-EVL-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP11]]
+; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0
+; IF-EVL-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT7]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; IF-EVL-NEXT: [[TMP13:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP13]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND4]], <vscale x 4 x ptr> align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; IF-EVL-NEXT: [[TMP15:%.*]] = or disjoint <vscale x 4 x i64> [[TMP13]], splat (i64 1)
+; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP15]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND4]], <vscale x 4 x ptr> align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; IF-EVL-NEXT: [[TMP17:%.*]] = or disjoint <vscale x 4 x i64> [[TMP13]], splat (i64 2)
+; IF-EVL-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP17]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND4]], <vscale x 4 x ptr> align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; IF-EVL-NEXT: [[TMP19:%.*]] = or disjoint <vscale x 4 x i64> [[TMP13]], splat (i64 3)
+; IF-EVL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP19]]
+; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND4]], <vscale x 4 x ptr> align 4 [[TMP20]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP9]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
+; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT8]]
+; IF-EVL-NEXT: [[VEC_IND_NEXT9]] = add <vscale x 4 x i32> [[VEC_IND4]], [[BROADCAST_SPLAT6]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP1]]
+; IF-EVL-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; IF-EVL: middle.block:
+; IF-EVL-NEXT: br label [[EXIT:%.*]]
+; IF-EVL: scalar.ph:
+; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
+; IF-EVL: for.body:
+; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[N]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP23:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP23]]
+; IF-EVL-NEXT: [[TMP24:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; IF-EVL-NEXT: store i32 [[TMP24]], ptr [[ARRAYIDX]], align 4
+; IF-EVL-NEXT: [[TMP25:%.*]] = or disjoint i64 [[TMP23]], 1
+; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP25]]
+; IF-EVL-NEXT: store i32 [[TMP24]], ptr [[ARRAYIDX4]], align 4
+; IF-EVL-NEXT: [[TMP26:%.*]] = or disjoint i64 [[TMP23]], 2
+; IF-EVL-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP26]]
+; IF-EVL-NEXT: store i32 [[TMP24]], ptr [[ARRAYIDX8]], align 4
+; IF-EVL-NEXT: [[TMP27:%.*]] = or disjoint i64 [[TMP23]], 3
+; IF-EVL-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP27]]
+; IF-EVL-NEXT: store i32 [[TMP24]], ptr [[ARRAYIDX9]], align 4
+; IF-EVL-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
+; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVARS_IV_NEXT]], 0
+; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP16:![0-9]+]]
+; IF-EVL: exit:
+; IF-EVL-NEXT: ret void
+;
+; NO-VP-LABEL: @store_factor_4_reverse(
+; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = add nsw i64 [[N:%.*]], -1
+; NO-VP-NEXT: [[SMIN:%.*]] = call i64 @llvm.smin.i64(i64 [[TMP0]], i64 0)
+; NO-VP-NEXT: [[TMP1:%.*]] = sub i64 [[N]], [[SMIN]]
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP5]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[N]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP7]], splat (i64 -1)
+; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP8]]
+; NO-VP-NEXT: [[TMP9:%.*]] = mul i64 -1, [[TMP5]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: [[TMP10:%.*]] = trunc i64 [[N]] to i32
+; NO-VP-NEXT: [[TMP11:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP10]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], splat (i32 -1)
+; NO-VP-NEXT: [[INDUCTION5:%.*]] = add <vscale x 4 x i32> [[BROADCAST_SPLAT4]], [[TMP12]]
+; NO-VP-NEXT: [[TMP13:%.*]] = trunc i64 [[TMP5]] to i32
+; NO-VP-NEXT: [[TMP14:%.*]] = mul i32 -1, [[TMP13]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP14]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT6]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND8:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT9:%.*]], [[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP15:%.*]] = shl nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 2)
+; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], <vscale x 4 x i64> [[TMP15]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND8]], <vscale x 4 x ptr> [[TMP16]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; NO-VP-NEXT: [[TMP17:%.*]] = or disjoint <vscale x 4 x i64> [[TMP15]], splat (i64 1)
+; NO-VP-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP17]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND8]], <vscale x 4 x ptr> [[TMP18]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; NO-VP-NEXT: [[TMP19:%.*]] = or disjoint <vscale x 4 x i64> [[TMP15]], splat (i64 2)
+; NO-VP-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP19]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND8]], <vscale x 4 x ptr> [[TMP20]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; NO-VP-NEXT: [[TMP21:%.*]] = or disjoint <vscale x 4 x i64> [[TMP15]], splat (i64 3)
+; NO-VP-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], <vscale x 4 x i64> [[TMP21]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[VEC_IND8]], <vscale x 4 x ptr> [[TMP22]], i32 4, <vscale x 4 x i1> splat (i1 true))
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; NO-VP-NEXT: [[VEC_IND_NEXT9]] = add <vscale x 4 x i32> [[VEC_IND8]], [[BROADCAST_SPLAT7]]
+; NO-VP-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[N]], [[ENTRY:%.*]] ]
+; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: for.body:
+; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[TMP24:%.*]] = shl nsw i64 [[INDVARS_IV]], 2
+; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP24]]
+; NO-VP-NEXT: [[TMP25:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; NO-VP-NEXT: store i32 [[TMP25]], ptr [[ARRAYIDX]], align 4
+; NO-VP-NEXT: [[TMP26:%.*]] = or disjoint i64 [[TMP24]], 1
+; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP26]]
+; NO-VP-NEXT: store i32 [[TMP25]], ptr [[ARRAYIDX4]], align 4
+; NO-VP-NEXT: [[TMP27:%.*]] = or disjoint i64 [[TMP24]], 2
+; NO-VP-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP27]]
+; NO-VP-NEXT: store i32 [[TMP25]], ptr [[ARRAYIDX8]], align 4
+; NO-VP-NEXT: [[TMP28:%.*]] = or disjoint i64 [[TMP24]], 3
+; NO-VP-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP28]]
+; NO-VP-NEXT: store i32 [[TMP25]], ptr [[ARRAYIDX9]], align 4
+; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
+; NO-VP-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVARS_IV_NEXT]], 0
+; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP15:![0-9]+]]
+; NO-VP: exit:
+; NO-VP-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ %n, %entry ], [ %iv.next, %for.body ]
+ %0 = shl nsw i64 %iv, 2
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %0
+ %1 = trunc i64 %iv to i32
+ store i32 %1, ptr %arrayidx, align 4
+ %2 = or disjoint i64 %0, 1
+ %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %2
+ store i32 %1, ptr %arrayidx4, align 4
+ %3 = or disjoint i64 %0, 2
+ %arrayidx8 = getelementptr inbounds i32, ptr %a, i64 %3
+ store i32 %1, ptr %arrayidx8, align 4
+ %4 = or disjoint i64 %0, 3
+ %arrayidx9 = getelementptr inbounds i32, ptr %a, i64 %4
+ store i32 %1, ptr %arrayidx9, align 4
+ %iv.next = add nsw i64 %iv, -1
+ %exitcond = icmp sgt i64 %iv.next, 0
+ br i1 %exitcond, label %for.body, label %exit
+
+exit:
+ ret void
+}
+
!0 = distinct !{!0, !1}
!1 = !{!"llvm.loop.vectorize.enable", i1 true}
More information about the llvm-commits
mailing list