[llvm] Slp basic test (PR #152355)

Mikhail Gudim via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 6 11:11:43 PDT 2025


https://github.com/mgudim created https://github.com/llvm/llvm-project/pull/152355

Add a basic test for SLPVectorizer to make sure that upcoming
refactoring patches don't break anything. Also, record a test for a
missed opportunity.

>From 3edeae584cd1ca6b659489be0d2c920c332ff38a Mon Sep 17 00:00:00 2001
From: Mikhail Gudim <mgudim at ventanamicro.com>
Date: Tue, 5 Aug 2025 11:08:41 -0700
Subject: [PATCH 1/2] Add basic test for strided loads.

---
 .../RISCV/basic-strided-loads.ll              | 830 ++++++++++++++++++
 1 file changed, 830 insertions(+)
 create mode 100644 llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll

diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
new file mode 100644
index 0000000000000..8f3de5071d64b
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
@@ -0,0 +1,830 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+
+; RUN: opt -mtriple=riscv64 -mattr=+m,+v -passes=slp-vectorizer -S < %s | FileCheck %s
+
+define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_1_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT:    [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16
+; CHECK-NEXT:    store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT:    ret void
+;
+  %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+  %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1
+  %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2
+  %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3
+  %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 4
+  %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 5
+  %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 6
+  %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 7
+  %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 8
+  %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 9
+  %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 10
+  %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 11
+  %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 12
+  %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 13
+  %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14
+  %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15
+
+  %load0  = load i8, ptr %gep_l0 , align 16
+  %load1  = load i8, ptr %gep_l1 , align 16
+  %load2  = load i8, ptr %gep_l2 , align 16
+  %load3  = load i8, ptr %gep_l3 , align 16
+  %load4  = load i8, ptr %gep_l4 , align 16
+  %load5  = load i8, ptr %gep_l5 , align 16
+  %load6  = load i8, ptr %gep_l6 , align 16
+  %load7  = load i8, ptr %gep_l7 , align 16
+  %load8  = load i8, ptr %gep_l8 , align 16
+  %load9  = load i8, ptr %gep_l9 , align 16
+  %load10 = load i8, ptr %gep_l10, align 16
+  %load11 = load i8, ptr %gep_l11, align 16
+  %load12 = load i8, ptr %gep_l12, align 16
+  %load13 = load i8, ptr %gep_l13, align 16
+  %load14 = load i8, ptr %gep_l14, align 16
+  %load15 = load i8, ptr %gep_l15, align 16
+
+  %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+  %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+  %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+  %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+  %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+  %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+  %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+  %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+  %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+  %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+  %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+  %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+  %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+  %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+  %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+  %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+  store i8 %load0, ptr %gep_s0, align 16
+  store i8 %load1, ptr %gep_s1, align 16
+  store i8 %load2, ptr %gep_s2, align 16
+  store i8 %load3, ptr %gep_s3, align 16
+  store i8 %load4, ptr %gep_s4, align 16
+  store i8 %load5, ptr %gep_s5, align 16
+  store i8 %load6, ptr %gep_s6, align 16
+  store i8 %load7, ptr %gep_s7, align 16
+  store i8 %load8, ptr %gep_s8, align 16
+  store i8 %load9, ptr %gep_s9, align 16
+  store i8 %load10, ptr %gep_s10, align 16
+  store i8 %load11, ptr %gep_s11, align 16
+  store i8 %load12, ptr %gep_s12, align 16
+  store i8 %load13, ptr %gep_s13, align 16
+  store i8 %load14, ptr %gep_s14, align 16
+  store i8 %load15, ptr %gep_s15, align 16
+
+  ret void
+}
+
+define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_1_with_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT:    [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT:    store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT:    ret void
+;
+  %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+  %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1
+  %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2
+  %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3
+  %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 4
+  %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 5
+  %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 6
+  %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 7
+  %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 8
+  %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 9
+  %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 10
+  %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 11
+  %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 12
+  %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 13
+  %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14
+  %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15
+
+  %load0  = load i8, ptr %gep_l0 , align 16
+  %load1  = load i8, ptr %gep_l1 , align 16
+  %load2  = load i8, ptr %gep_l2 , align 16
+  %load3  = load i8, ptr %gep_l3 , align 16
+  %load4  = load i8, ptr %gep_l4 , align 16
+  %load5  = load i8, ptr %gep_l5 , align 16
+  %load6  = load i8, ptr %gep_l6 , align 16
+  %load7  = load i8, ptr %gep_l7 , align 16
+  %load8  = load i8, ptr %gep_l8 , align 16
+  %load9  = load i8, ptr %gep_l9 , align 16
+  %load10 = load i8, ptr %gep_l10, align 16
+  %load11 = load i8, ptr %gep_l11, align 16
+  %load12 = load i8, ptr %gep_l12, align 16
+  %load13 = load i8, ptr %gep_l13, align 16
+  %load14 = load i8, ptr %gep_l14, align 16
+  %load15 = load i8, ptr %gep_l15, align 16
+
+  %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+  %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+  %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+  %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+  %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+  %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+  %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+  %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+  %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+  %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+  %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+  %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+  %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+  %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+  %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+  %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+  store i8 %load1, ptr %gep_s0, align 16
+  store i8 %load0, ptr %gep_s1, align 16
+  store i8 %load2, ptr %gep_s2, align 16
+  store i8 %load3, ptr %gep_s3, align 16
+  store i8 %load4, ptr %gep_s4, align 16
+  store i8 %load5, ptr %gep_s5, align 16
+  store i8 %load6, ptr %gep_s6, align 16
+  store i8 %load7, ptr %gep_s7, align 16
+  store i8 %load8, ptr %gep_s8, align 16
+  store i8 %load9, ptr %gep_s9, align 16
+  store i8 %load10, ptr %gep_s10, align 16
+  store i8 %load11, ptr %gep_s11, align 16
+  store i8 %load12, ptr %gep_s12, align 16
+  store i8 %load13, ptr %gep_s13, align 16
+  store i8 %load14, ptr %gep_s14, align 16
+  store i8 %load15, ptr %gep_s15, align 16
+
+  ret void
+}
+
+
+define void @const_stride_2_no_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_2_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT:    [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <31 x i8> [[TMP2]], <31 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK-NEXT:    store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT:    ret void
+;
+  %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+  %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 2
+  %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 4
+  %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 6
+  %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8
+  %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 10
+  %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 12
+  %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 14
+  %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16
+  %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 18
+  %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 20
+  %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 22
+  %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24
+  %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 26
+  %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28
+  %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30
+
+  %load0  = load i8, ptr %gep_l0 , align 16
+  %load1  = load i8, ptr %gep_l1 , align 16
+  %load2  = load i8, ptr %gep_l2 , align 16
+  %load3  = load i8, ptr %gep_l3 , align 16
+  %load4  = load i8, ptr %gep_l4 , align 16
+  %load5  = load i8, ptr %gep_l5 , align 16
+  %load6  = load i8, ptr %gep_l6 , align 16
+  %load7  = load i8, ptr %gep_l7 , align 16
+  %load8  = load i8, ptr %gep_l8 , align 16
+  %load9  = load i8, ptr %gep_l9 , align 16
+  %load10 = load i8, ptr %gep_l10, align 16
+  %load11 = load i8, ptr %gep_l11, align 16
+  %load12 = load i8, ptr %gep_l12, align 16
+  %load13 = load i8, ptr %gep_l13, align 16
+  %load14 = load i8, ptr %gep_l14, align 16
+  %load15 = load i8, ptr %gep_l15, align 16
+
+  %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+  %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+  %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+  %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+  %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+  %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+  %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+  %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+  %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+  %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+  %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+  %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+  %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+  %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+  %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+  %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+  store i8 %load0, ptr %gep_s0, align 16
+  store i8 %load1, ptr %gep_s1, align 16
+  store i8 %load2, ptr %gep_s2, align 16
+  store i8 %load3, ptr %gep_s3, align 16
+  store i8 %load4, ptr %gep_s4, align 16
+  store i8 %load5, ptr %gep_s5, align 16
+  store i8 %load6, ptr %gep_s6, align 16
+  store i8 %load7, ptr %gep_s7, align 16
+  store i8 %load8, ptr %gep_s8, align 16
+  store i8 %load9, ptr %gep_s9, align 16
+  store i8 %load10, ptr %gep_s10, align 16
+  store i8 %load11, ptr %gep_s11, align 16
+  store i8 %load12, ptr %gep_s12, align 16
+  store i8 %load13, ptr %gep_s13, align 16
+  store i8 %load14, ptr %gep_s14, align 16
+  store i8 %load15, ptr %gep_s15, align 16
+
+  ret void
+}
+
+define void @const_stride_2_with_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_2_with_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT:    [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison)
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> <i32 2, i32 0, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK-NEXT:    store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT:    ret void
+;
+  %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+  %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 2
+  %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 4
+  %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 6
+  %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8
+  %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 10
+  %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 12
+  %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 14
+  %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16
+  %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 18
+  %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 20
+  %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 22
+  %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24
+  %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 26
+  %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28
+  %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30
+
+  %load0  = load i8, ptr %gep_l0 , align 16
+  %load1  = load i8, ptr %gep_l1 , align 16
+  %load2  = load i8, ptr %gep_l2 , align 16
+  %load3  = load i8, ptr %gep_l3 , align 16
+  %load4  = load i8, ptr %gep_l4 , align 16
+  %load5  = load i8, ptr %gep_l5 , align 16
+  %load6  = load i8, ptr %gep_l6 , align 16
+  %load7  = load i8, ptr %gep_l7 , align 16
+  %load8  = load i8, ptr %gep_l8 , align 16
+  %load9  = load i8, ptr %gep_l9 , align 16
+  %load10 = load i8, ptr %gep_l10, align 16
+  %load11 = load i8, ptr %gep_l11, align 16
+  %load12 = load i8, ptr %gep_l12, align 16
+  %load13 = load i8, ptr %gep_l13, align 16
+  %load14 = load i8, ptr %gep_l14, align 16
+  %load15 = load i8, ptr %gep_l15, align 16
+
+  %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+  %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+  %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+  %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+  %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+  %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+  %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+  %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+  %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+  %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+  %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+  %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+  %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+  %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+  %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+  %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+  store i8 %load1, ptr %gep_s0, align 16
+  store i8 %load0, ptr %gep_s1, align 16
+  store i8 %load2, ptr %gep_s2, align 16
+  store i8 %load3, ptr %gep_s3, align 16
+  store i8 %load4, ptr %gep_s4, align 16
+  store i8 %load5, ptr %gep_s5, align 16
+  store i8 %load6, ptr %gep_s6, align 16
+  store i8 %load7, ptr %gep_s7, align 16
+  store i8 %load8, ptr %gep_s8, align 16
+  store i8 %load9, ptr %gep_s9, align 16
+  store i8 %load10, ptr %gep_s10, align 16
+  store i8 %load11, ptr %gep_s11, align 16
+  store i8 %load12, ptr %gep_s12, align 16
+  store i8 %load13, ptr %gep_s13, align 16
+  store i8 %load14, ptr %gep_s14, align 16
+  store i8 %load15, ptr %gep_s15, align 16
+
+  ret void
+}
+
+define void @rt_stride_1_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @rt_stride_1_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[STRIDE0:%.*]] = mul nsw i64 [[STRIDE]], 0
+; CHECK-NEXT:    [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]]
+; CHECK-NEXT:    [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[STRIDE]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16)
+; CHECK-NEXT:    store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT:    ret void
+;
+  %stride0  = mul nsw i64 %stride, 0
+  %stride1  = mul nsw i64 %stride, 1
+  %stride2  = mul nsw i64 %stride, 2
+  %stride3  = mul nsw i64 %stride, 3
+  %stride4  = mul nsw i64 %stride, 4
+  %stride5  = mul nsw i64 %stride, 5
+  %stride6  = mul nsw i64 %stride, 6
+  %stride7  = mul nsw i64 %stride, 7
+  %stride8  = mul nsw i64 %stride, 8
+  %stride9  = mul nsw i64 %stride, 9
+  %stride10 = mul nsw i64 %stride, 10
+  %stride11 = mul nsw i64 %stride, 11
+  %stride12 = mul nsw i64 %stride, 12
+  %stride13 = mul nsw i64 %stride, 13
+  %stride14 = mul nsw i64 %stride, 14
+  %stride15 = mul nsw i64 %stride, 15
+
+  %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %stride0
+  %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %stride1
+  %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %stride2
+  %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %stride3
+  %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %stride4
+  %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %stride5
+  %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %stride6
+  %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %stride7
+  %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %stride8
+  %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %stride9
+  %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %stride10
+  %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %stride11
+  %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %stride12
+  %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %stride13
+  %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14
+  %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15
+
+  %load0  = load i8, ptr %gep_l0 , align 16
+  %load1  = load i8, ptr %gep_l1 , align 16
+  %load2  = load i8, ptr %gep_l2 , align 16
+  %load3  = load i8, ptr %gep_l3 , align 16
+  %load4  = load i8, ptr %gep_l4 , align 16
+  %load5  = load i8, ptr %gep_l5 , align 16
+  %load6  = load i8, ptr %gep_l6 , align 16
+  %load7  = load i8, ptr %gep_l7 , align 16
+  %load8  = load i8, ptr %gep_l8 , align 16
+  %load9  = load i8, ptr %gep_l9 , align 16
+  %load10 = load i8, ptr %gep_l10, align 16
+  %load11 = load i8, ptr %gep_l11, align 16
+  %load12 = load i8, ptr %gep_l12, align 16
+  %load13 = load i8, ptr %gep_l13, align 16
+  %load14 = load i8, ptr %gep_l14, align 16
+  %load15 = load i8, ptr %gep_l15, align 16
+
+  %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+  %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+  %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+  %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+  %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+  %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+  %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+  %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+  %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+  %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+  %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+  %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+  %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+  %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+  %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+  %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+  store i8 %load0, ptr %gep_s0, align 16
+  store i8 %load1, ptr %gep_s1, align 16
+  store i8 %load2, ptr %gep_s2, align 16
+  store i8 %load3, ptr %gep_s3, align 16
+  store i8 %load4, ptr %gep_s4, align 16
+  store i8 %load5, ptr %gep_s5, align 16
+  store i8 %load6, ptr %gep_s6, align 16
+  store i8 %load7, ptr %gep_s7, align 16
+  store i8 %load8, ptr %gep_s8, align 16
+  store i8 %load9, ptr %gep_s9, align 16
+  store i8 %load10, ptr %gep_s10, align 16
+  store i8 %load11, ptr %gep_s11, align 16
+  store i8 %load12, ptr %gep_s12, align 16
+  store i8 %load13, ptr %gep_s13, align 16
+  store i8 %load14, ptr %gep_s14, align 16
+  store i8 %load15, ptr %gep_s15, align 16
+
+  ret void
+}
+
+define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @rt_stride_1_with_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[STRIDE0:%.*]] = mul nsw i64 [[STRIDE]], 0
+; CHECK-NEXT:    [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]]
+; CHECK-NEXT:    [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[STRIDE]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16)
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT:    store <16 x i8> [[TMP3]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT:    ret void
+;
+  %stride0  = mul nsw i64 %stride, 0
+  %stride1  = mul nsw i64 %stride, 1
+  %stride2  = mul nsw i64 %stride, 2
+  %stride3  = mul nsw i64 %stride, 3
+  %stride4  = mul nsw i64 %stride, 4
+  %stride5  = mul nsw i64 %stride, 5
+  %stride6  = mul nsw i64 %stride, 6
+  %stride7  = mul nsw i64 %stride, 7
+  %stride8  = mul nsw i64 %stride, 8
+  %stride9  = mul nsw i64 %stride, 9
+  %stride10 = mul nsw i64 %stride, 10
+  %stride11 = mul nsw i64 %stride, 11
+  %stride12 = mul nsw i64 %stride, 12
+  %stride13 = mul nsw i64 %stride, 13
+  %stride14 = mul nsw i64 %stride, 14
+  %stride15 = mul nsw i64 %stride, 15
+
+  %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %stride0
+  %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %stride1
+  %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %stride2
+  %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %stride3
+  %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %stride4
+  %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %stride5
+  %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %stride6
+  %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %stride7
+  %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %stride8
+  %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %stride9
+  %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %stride10
+  %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %stride11
+  %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %stride12
+  %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %stride13
+  %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14
+  %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15
+
+  %load0  = load i8, ptr %gep_l0 , align 16
+  %load1  = load i8, ptr %gep_l1 , align 16
+  %load2  = load i8, ptr %gep_l2 , align 16
+  %load3  = load i8, ptr %gep_l3 , align 16
+  %load4  = load i8, ptr %gep_l4 , align 16
+  %load5  = load i8, ptr %gep_l5 , align 16
+  %load6  = load i8, ptr %gep_l6 , align 16
+  %load7  = load i8, ptr %gep_l7 , align 16
+  %load8  = load i8, ptr %gep_l8 , align 16
+  %load9  = load i8, ptr %gep_l9 , align 16
+  %load10 = load i8, ptr %gep_l10, align 16
+  %load11 = load i8, ptr %gep_l11, align 16
+  %load12 = load i8, ptr %gep_l12, align 16
+  %load13 = load i8, ptr %gep_l13, align 16
+  %load14 = load i8, ptr %gep_l14, align 16
+  %load15 = load i8, ptr %gep_l15, align 16
+
+  %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+  %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+  %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+  %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+  %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+  %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+  %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+  %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+  %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+  %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+  %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+  %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+  %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+  %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+  %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+  %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+  store i8 %load1, ptr %gep_s0, align 16
+  store i8 %load0, ptr %gep_s1, align 16
+  store i8 %load2, ptr %gep_s2, align 16
+  store i8 %load3, ptr %gep_s3, align 16
+  store i8 %load4, ptr %gep_s4, align 16
+  store i8 %load5, ptr %gep_s5, align 16
+  store i8 %load6, ptr %gep_s6, align 16
+  store i8 %load7, ptr %gep_s7, align 16
+  store i8 %load8, ptr %gep_s8, align 16
+  store i8 %load9, ptr %gep_s9, align 16
+  store i8 %load10, ptr %gep_s10, align 16
+  store i8 %load11, ptr %gep_s11, align 16
+  store i8 %load12, ptr %gep_s12, align 16
+  store i8 %load13, ptr %gep_s13, align 16
+  store i8 %load14, ptr %gep_s14, align 16
+  store i8 %load15, ptr %gep_s15, align 16
+
+  ret void
+}
+
+define void @rt_stride_2_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @rt_stride_2_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x i64> poison, i64 [[STRIDE]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = mul nsw <8 x i64> [[TMP2]], <i64 0, i64 2, i64 4, i64 6, i64 8, i64 10, i64 12, i64 14>
+; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <8 x i64> [[TMP2]], <i64 16, i64 18, i64 20, i64 22, i64 24, i64 26, i64 28, i64 30>
+; CHECK-NEXT:    [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <8 x ptr> poison, ptr [[PL]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <8 x ptr> [[TMP5]], <8 x ptr> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr i8, <8 x ptr> [[TMP6]], <8 x i64> [[TMP3]]
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr i8, <8 x ptr> [[TMP6]], <8 x i64> [[TMP4]]
+; CHECK-NEXT:    [[TMP9:%.*]] = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> [[TMP7]], i32 16, <8 x i1> splat (i1 true), <8 x i8> poison)
+; CHECK-NEXT:    [[TMP10:%.*]] = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> [[TMP8]], i32 16, <8 x i1> splat (i1 true), <8 x i8> poison)
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <8 x i8> [[TMP9]], <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <8 x i8> [[TMP10]], <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <8 x i8> [[TMP9]], <8 x i8> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT:    store <16 x i8> [[TMP12]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT:    ret void
+;
+  %stride0  = mul nsw i64 %stride, 0
+  %stride1  = mul nsw i64 %stride, 2
+  %stride2  = mul nsw i64 %stride, 4
+  %stride3  = mul nsw i64 %stride, 6
+  %stride4  = mul nsw i64 %stride, 8
+  %stride5  = mul nsw i64 %stride, 10
+  %stride6  = mul nsw i64 %stride, 12
+  %stride7  = mul nsw i64 %stride, 14
+  %stride8  = mul nsw i64 %stride, 16
+  %stride9  = mul nsw i64 %stride, 18
+  %stride10 = mul nsw i64 %stride, 20
+  %stride11 = mul nsw i64 %stride, 22
+  %stride12 = mul nsw i64 %stride, 24
+  %stride13 = mul nsw i64 %stride, 26
+  %stride14 = mul nsw i64 %stride, 28
+  %stride15 = mul nsw i64 %stride, 30
+
+  %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %stride0
+  %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %stride1
+  %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %stride2
+  %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %stride3
+  %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %stride4
+  %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %stride5
+  %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %stride6
+  %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %stride7
+  %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %stride8
+  %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %stride9
+  %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %stride10
+  %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %stride11
+  %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %stride12
+  %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %stride13
+  %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14
+  %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15
+
+  %load0  = load i8, ptr %gep_l0 , align 16
+  %load1  = load i8, ptr %gep_l1 , align 16
+  %load2  = load i8, ptr %gep_l2 , align 16
+  %load3  = load i8, ptr %gep_l3 , align 16
+  %load4  = load i8, ptr %gep_l4 , align 16
+  %load5  = load i8, ptr %gep_l5 , align 16
+  %load6  = load i8, ptr %gep_l6 , align 16
+  %load7  = load i8, ptr %gep_l7 , align 16
+  %load8  = load i8, ptr %gep_l8 , align 16
+  %load9  = load i8, ptr %gep_l9 , align 16
+  %load10 = load i8, ptr %gep_l10, align 16
+  %load11 = load i8, ptr %gep_l11, align 16
+  %load12 = load i8, ptr %gep_l12, align 16
+  %load13 = load i8, ptr %gep_l13, align 16
+  %load14 = load i8, ptr %gep_l14, align 16
+  %load15 = load i8, ptr %gep_l15, align 16
+
+  %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+  %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+  %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+  %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+  %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+  %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+  %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+  %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+  %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+  %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+  %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+  %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+  %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+  %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+  %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+  %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+  store i8 %load0, ptr %gep_s0, align 16
+  store i8 %load1, ptr %gep_s1, align 16
+  store i8 %load2, ptr %gep_s2, align 16
+  store i8 %load3, ptr %gep_s3, align 16
+  store i8 %load4, ptr %gep_s4, align 16
+  store i8 %load5, ptr %gep_s5, align 16
+  store i8 %load6, ptr %gep_s6, align 16
+  store i8 %load7, ptr %gep_s7, align 16
+  store i8 %load8, ptr %gep_s8, align 16
+  store i8 %load9, ptr %gep_s9, align 16
+  store i8 %load10, ptr %gep_s10, align 16
+  store i8 %load11, ptr %gep_s11, align 16
+  store i8 %load12, ptr %gep_s12, align 16
+  store i8 %load13, ptr %gep_s13, align 16
+  store i8 %load14, ptr %gep_s14, align 16
+  store i8 %load15, ptr %gep_s15, align 16
+
+  ret void
+}
+
+define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @constant_stride_widen_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT:    [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = call <28 x i8> @llvm.masked.load.v28i8.p0(ptr [[GEP_L0]], i32 16, <28 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <28 x i8> poison)
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27>
+; CHECK-NEXT:    store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT:    ret void
+;
+  %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+  %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1
+  %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2
+  %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3
+  %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8
+  %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 9
+  %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 10
+  %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 11
+  %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16
+  %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 17
+  %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 18
+  %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 19
+  %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24
+  %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 25
+  %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 26
+  %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 27
+
+  %load0  = load i8, ptr %gep_l0 , align 16
+  %load1  = load i8, ptr %gep_l1 , align 16
+  %load2  = load i8, ptr %gep_l2 , align 16
+  %load3  = load i8, ptr %gep_l3 , align 16
+  %load4  = load i8, ptr %gep_l4 , align 16
+  %load5  = load i8, ptr %gep_l5 , align 16
+  %load6  = load i8, ptr %gep_l6 , align 16
+  %load7  = load i8, ptr %gep_l7 , align 16
+  %load8  = load i8, ptr %gep_l8 , align 16
+  %load9  = load i8, ptr %gep_l9 , align 16
+  %load10 = load i8, ptr %gep_l10, align 16
+  %load11 = load i8, ptr %gep_l11, align 16
+  %load12 = load i8, ptr %gep_l12, align 16
+  %load13 = load i8, ptr %gep_l13, align 16
+  %load14 = load i8, ptr %gep_l14, align 16
+  %load15 = load i8, ptr %gep_l15, align 16
+
+  %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+  %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+  %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+  %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+  %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+  %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+  %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+  %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+  %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+  %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+  %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+  %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+  %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+  %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+  %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+  %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+  store i8 %load0, ptr %gep_s0, align 16
+  store i8 %load1, ptr %gep_s1, align 16
+  store i8 %load2, ptr %gep_s2, align 16
+  store i8 %load3, ptr %gep_s3, align 16
+  store i8 %load4, ptr %gep_s4, align 16
+  store i8 %load5, ptr %gep_s5, align 16
+  store i8 %load6, ptr %gep_s6, align 16
+  store i8 %load7, ptr %gep_s7, align 16
+  store i8 %load8, ptr %gep_s8, align 16
+  store i8 %load9, ptr %gep_s9, align 16
+  store i8 %load10, ptr %gep_s10, align 16
+  store i8 %load11, ptr %gep_s11, align 16
+  store i8 %load12, ptr %gep_s12, align 16
+  store i8 %load13, ptr %gep_s13, align 16
+  store i8 %load14, ptr %gep_s14, align 16
+  store i8 %load15, ptr %gep_s15, align 16
+
+  ret void
+}
+
+define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @rt_stride_widen_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[OFFSET0:%.*]] = mul nsw i64 [[STRIDE]], 0
+; CHECK-NEXT:    [[OFFSET4:%.*]] = mul nsw i64 [[STRIDE]], 1
+; CHECK-NEXT:    [[OFFSET8:%.*]] = mul nsw i64 [[STRIDE]], 2
+; CHECK-NEXT:    [[OFFSET12:%.*]] = mul nsw i64 [[STRIDE]], 3
+; CHECK-NEXT:    [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET0]]
+; CHECK-NEXT:    [[GEP_L4:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET4]]
+; CHECK-NEXT:    [[GEP_L8:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET8]]
+; CHECK-NEXT:    [[GEP_L12:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET12]]
+; CHECK-NEXT:    [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 16
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 16
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP11]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; CHECK-NEXT:    store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT:    ret void
+;
+  %offset0  = mul nsw i64 %stride, 0
+  %offset1  = add nsw i64 %offset0, 1
+  %offset2  = add nsw i64 %offset0, 2
+  %offset3  = add nsw i64 %offset0, 3
+  %offset4  = mul nsw i64 %stride, 1
+  %offset5  = add nsw i64 %offset4, 1
+  %offset6  = add nsw i64 %offset4, 2
+  %offset7  = add nsw i64 %offset4, 3
+  %offset8  = mul nsw i64 %stride, 2
+  %offset9  = add nsw i64 %offset8, 1
+  %offset10  = add nsw i64 %offset8, 2
+  %offset11  = add nsw i64 %offset8, 3
+  %offset12 = mul nsw i64 %stride, 3
+  %offset13 = add nsw i64 %offset12, 1
+  %offset14 = add nsw i64 %offset12, 2
+  %offset15 = add nsw i64 %offset12, 3
+
+  %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
+  %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %offset1
+  %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %offset2
+  %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %offset3
+  %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %offset4
+  %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %offset5
+  %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %offset6
+  %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %offset7
+  %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %offset8
+  %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %offset9
+  %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %offset10
+  %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %offset11
+  %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %offset12
+  %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %offset13
+  %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %offset14
+  %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %offset15
+
+  %load0  = load i8, ptr %gep_l0 , align 16
+  %load1  = load i8, ptr %gep_l1 , align 16
+  %load2  = load i8, ptr %gep_l2 , align 16
+  %load3  = load i8, ptr %gep_l3 , align 16
+  %load4  = load i8, ptr %gep_l4 , align 16
+  %load5  = load i8, ptr %gep_l5 , align 16
+  %load6  = load i8, ptr %gep_l6 , align 16
+  %load7  = load i8, ptr %gep_l7 , align 16
+  %load8  = load i8, ptr %gep_l8 , align 16
+  %load9  = load i8, ptr %gep_l9 , align 16
+  %load10 = load i8, ptr %gep_l10, align 16
+  %load11 = load i8, ptr %gep_l11, align 16
+  %load12 = load i8, ptr %gep_l12, align 16
+  %load13 = load i8, ptr %gep_l13, align 16
+  %load14 = load i8, ptr %gep_l14, align 16
+  %load15 = load i8, ptr %gep_l15, align 16
+
+  %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+  %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+  %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+  %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+  %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+  %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+  %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+  %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+  %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+  %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+  %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+  %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+  %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+  %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+  %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+  %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+  store i8 %load0, ptr %gep_s0, align 16
+  store i8 %load1, ptr %gep_s1, align 16
+  store i8 %load2, ptr %gep_s2, align 16
+  store i8 %load3, ptr %gep_s3, align 16
+  store i8 %load4, ptr %gep_s4, align 16
+  store i8 %load5, ptr %gep_s5, align 16
+  store i8 %load6, ptr %gep_s6, align 16
+  store i8 %load7, ptr %gep_s7, align 16
+  store i8 %load8, ptr %gep_s8, align 16
+  store i8 %load9, ptr %gep_s9, align 16
+  store i8 %load10, ptr %gep_s10, align 16
+  store i8 %load11, ptr %gep_s11, align 16
+  store i8 %load12, ptr %gep_s12, align 16
+  store i8 %load13, ptr %gep_s13, align 16
+  store i8 %load14, ptr %gep_s14, align 16
+  store i8 %load15, ptr %gep_s15, align 16
+
+  ret void
+}

>From f75700895f4a70d87e233af8fc80f49debdddf7b Mon Sep 17 00:00:00 2001
From: Mikhail Gudim <mgudim at ventanamicro.com>
Date: Wed, 6 Aug 2025 11:06:32 -0700
Subject: [PATCH 2/2] [SLPVectorizer][RISCV] Add a basic test.

Add a basic test for SLPVectorizer to make sure that upcoming
refactoring patches don't break anything. Also, record a test for a
missed opportunity.
---
 .../RISCV/basic-strided-loads.ll              | 127 +++---------------
 1 file changed, 19 insertions(+), 108 deletions(-)

diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
index 8f3de5071d64b..645dbc49269f0 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
@@ -143,6 +143,7 @@ define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) {
   %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
   %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
 
+  ; NOTE: value from %load1 in stored in  %gep_s0
   store i8 %load1, ptr %gep_s0, align 16
   store i8 %load0, ptr %gep_s1, align 16
   store i8 %load2, ptr %gep_s2, align 16
@@ -526,114 +527,15 @@ define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) {
   ret void
 }
 
-define void @rt_stride_2_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
-; CHECK-LABEL: define void @rt_stride_2_no_reordering(
-; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x i64> poison, i64 [[STRIDE]], i32 0
-; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> poison, <8 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP3:%.*]] = mul nsw <8 x i64> [[TMP2]], <i64 0, i64 2, i64 4, i64 6, i64 8, i64 10, i64 12, i64 14>
-; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <8 x i64> [[TMP2]], <i64 16, i64 18, i64 20, i64 22, i64 24, i64 26, i64 28, i64 30>
-; CHECK-NEXT:    [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <8 x ptr> poison, ptr [[PL]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <8 x ptr> [[TMP5]], <8 x ptr> poison, <8 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr i8, <8 x ptr> [[TMP6]], <8 x i64> [[TMP3]]
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr i8, <8 x ptr> [[TMP6]], <8 x i64> [[TMP4]]
-; CHECK-NEXT:    [[TMP9:%.*]] = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> [[TMP7]], i32 16, <8 x i1> splat (i1 true), <8 x i8> poison)
-; CHECK-NEXT:    [[TMP10:%.*]] = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> [[TMP8]], i32 16, <8 x i1> splat (i1 true), <8 x i8> poison)
-; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <8 x i8> [[TMP9]], <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <8 x i8> [[TMP10]], <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <8 x i8> [[TMP9]], <8 x i8> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT:    store <16 x i8> [[TMP12]], ptr [[GEP_S0]], align 16
-; CHECK-NEXT:    ret void
-;
-  %stride0  = mul nsw i64 %stride, 0
-  %stride1  = mul nsw i64 %stride, 2
-  %stride2  = mul nsw i64 %stride, 4
-  %stride3  = mul nsw i64 %stride, 6
-  %stride4  = mul nsw i64 %stride, 8
-  %stride5  = mul nsw i64 %stride, 10
-  %stride6  = mul nsw i64 %stride, 12
-  %stride7  = mul nsw i64 %stride, 14
-  %stride8  = mul nsw i64 %stride, 16
-  %stride9  = mul nsw i64 %stride, 18
-  %stride10 = mul nsw i64 %stride, 20
-  %stride11 = mul nsw i64 %stride, 22
-  %stride12 = mul nsw i64 %stride, 24
-  %stride13 = mul nsw i64 %stride, 26
-  %stride14 = mul nsw i64 %stride, 28
-  %stride15 = mul nsw i64 %stride, 30
-
-  %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %stride0
-  %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %stride1
-  %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %stride2
-  %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %stride3
-  %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %stride4
-  %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %stride5
-  %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %stride6
-  %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %stride7
-  %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %stride8
-  %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %stride9
-  %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %stride10
-  %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %stride11
-  %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %stride12
-  %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %stride13
-  %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14
-  %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15
-
-  %load0  = load i8, ptr %gep_l0 , align 16
-  %load1  = load i8, ptr %gep_l1 , align 16
-  %load2  = load i8, ptr %gep_l2 , align 16
-  %load3  = load i8, ptr %gep_l3 , align 16
-  %load4  = load i8, ptr %gep_l4 , align 16
-  %load5  = load i8, ptr %gep_l5 , align 16
-  %load6  = load i8, ptr %gep_l6 , align 16
-  %load7  = load i8, ptr %gep_l7 , align 16
-  %load8  = load i8, ptr %gep_l8 , align 16
-  %load9  = load i8, ptr %gep_l9 , align 16
-  %load10 = load i8, ptr %gep_l10, align 16
-  %load11 = load i8, ptr %gep_l11, align 16
-  %load12 = load i8, ptr %gep_l12, align 16
-  %load13 = load i8, ptr %gep_l13, align 16
-  %load14 = load i8, ptr %gep_l14, align 16
-  %load15 = load i8, ptr %gep_l15, align 16
-
-  %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
-  %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
-  %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
-  %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
-  %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
-  %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
-  %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
-  %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
-  %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
-  %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
-  %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
-  %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
-  %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
-  %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
-  %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
-  %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
-
-  store i8 %load0, ptr %gep_s0, align 16
-  store i8 %load1, ptr %gep_s1, align 16
-  store i8 %load2, ptr %gep_s2, align 16
-  store i8 %load3, ptr %gep_s3, align 16
-  store i8 %load4, ptr %gep_s4, align 16
-  store i8 %load5, ptr %gep_s5, align 16
-  store i8 %load6, ptr %gep_s6, align 16
-  store i8 %load7, ptr %gep_s7, align 16
-  store i8 %load8, ptr %gep_s8, align 16
-  store i8 %load9, ptr %gep_s9, align 16
-  store i8 %load10, ptr %gep_s10, align 16
-  store i8 %load11, ptr %gep_s11, align 16
-  store i8 %load12, ptr %gep_s12, align 16
-  store i8 %load13, ptr %gep_s13, align 16
-  store i8 %load14, ptr %gep_s14, align 16
-  store i8 %load15, ptr %gep_s15, align 16
-
-  ret void
-}
-
+; TODO: We want to generate this code:
+; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
+; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 8, <4 x i1> splat (i1 true), i32 4)
+; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8>
+; store <16 x i8> %bitcast_, ptr %gep_s0, align 16
+; ret void
+; }
 define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
 ; CHECK-LABEL: define void @constant_stride_widen_no_reordering(
 ; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
@@ -715,6 +617,15 @@ define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps)
   ret void
 }
 
+; TODO: We want to generate this code:
+; define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
+; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 %stride, <4 x i1> splat (i1 true), i32 4)
+; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8>
+; store <16 x i8> %bitcast_, ptr %gep_s0, align 16
+; ret void
+; }
 define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
 ; CHECK-LABEL: define void @rt_stride_widen_no_reordering(
 ; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {



More information about the llvm-commits mailing list