[llvm] [SLP] Vectorize non-power-of-2 ops with padding. (PR #77790)

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 11 07:49:16 PST 2024


https://github.com/fhahn created https://github.com/llvm/llvm-project/pull/77790

This patch introduces a new VectorizeWithPadding node type for root and
leave nodes to allow vectorizing loads/stores with non-power-of-2 number
of elements.

VectorizeWithPadding load nodes will pad the result to the next power of 2
with poison elements.

Non-leaf nodes will operate on normal power-of-2 vectors. For those
non-leaf nodes, we still track the number of padding elements needed to
go to the next power-of-2, to be used in various places, like cost
computation.

VectorizeWithPadding store nodes strip away the padding elements and
store the non-power-of-2 number of data elements.

Note that re-ordering and shuffling is not implemented for nodes
requiring padding yet to keep the initial implementation simpler.

The initial implementation also only tries to vectorize with padding if
original number of elements + 1 is a power-of-2, i.e. if only a single
padding element is needed.

The feature is guarded by a new flag, off by defaul for now.

>From 07a21243a1972d6a07273dff272771fd0887fd79 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 11 Jan 2024 15:47:57 +0000
Subject: [PATCH 1/2] [SLP] Add tests to vectorize 3 ops with padding.

---
 .../SLPVectorizer/AArch64/vec15-base.ll       | 127 +++++
 .../SLPVectorizer/AArch64/vec3-base.ll        | 297 ++++++++++
 .../SLPVectorizer/AArch64/vec3-calls.ll       |  60 ++
 .../AArch64/vec3-reorder-reshuffle.ll         | 516 ++++++++++++++++++
 4 files changed, 1000 insertions(+)
 create mode 100644 llvm/test/Transforms/SLPVectorizer/AArch64/vec15-base.ll
 create mode 100644 llvm/test/Transforms/SLPVectorizer/AArch64/vec3-base.ll
 create mode 100644 llvm/test/Transforms/SLPVectorizer/AArch64/vec3-calls.ll
 create mode 100644 llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll

diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vec15-base.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vec15-base.ll
new file mode 100644
index 00000000000000..b9e959d50befdd
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vec15-base.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes=slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
+
+define void @v15_load_i8_mul_by_constant_store(ptr %src, ptr noalias %dst) {
+; CHECK-LABEL: define void @v15_load_i8_mul_by_constant_store(
+; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 0
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = mul nsw <8 x i8> [[TMP0]], <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
+; CHECK-NEXT:    store <8 x i8> [[TMP1]], ptr [[DST]], align 1
+; CHECK-NEXT:    [[GEP_SRC_8:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 8
+; CHECK-NEXT:    [[DST_8:%.*]] = getelementptr i8, ptr [[DST]], i8 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_SRC_8]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = mul nsw <4 x i8> [[TMP2]], <i8 10, i8 10, i8 10, i8 10>
+; CHECK-NEXT:    store <4 x i8> [[TMP3]], ptr [[DST_8]], align 1
+; CHECK-NEXT:    [[GEP_SRC_12:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 12
+; CHECK-NEXT:    [[L_SRC_12:%.*]] = load i8, ptr [[GEP_SRC_12]], align 4
+; CHECK-NEXT:    [[MUL_12:%.*]] = mul nsw i8 [[L_SRC_12]], 10
+; CHECK-NEXT:    [[DST_12:%.*]] = getelementptr i8, ptr [[DST]], i8 12
+; CHECK-NEXT:    store i8 [[MUL_12]], ptr [[DST_12]], align 1
+; CHECK-NEXT:    [[GEP_SRC_13:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 13
+; CHECK-NEXT:    [[L_SRC_13:%.*]] = load i8, ptr [[GEP_SRC_13]], align 4
+; CHECK-NEXT:    [[MUL_13:%.*]] = mul nsw i8 [[L_SRC_13]], 10
+; CHECK-NEXT:    [[DST_13:%.*]] = getelementptr i8, ptr [[DST]], i8 13
+; CHECK-NEXT:    store i8 [[MUL_13]], ptr [[DST_13]], align 1
+; CHECK-NEXT:    [[GEP_SRC_14:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 14
+; CHECK-NEXT:    [[L_SRC_14:%.*]] = load i8, ptr [[GEP_SRC_14]], align 4
+; CHECK-NEXT:    [[MUL_14:%.*]] = mul nsw i8 [[L_SRC_14]], 10
+; CHECK-NEXT:    [[DST_14:%.*]] = getelementptr i8, ptr [[DST]], i8 14
+; CHECK-NEXT:    store i8 [[MUL_14]], ptr [[DST_14]], align 1
+; CHECK-NEXT:    ret void
+;
+entry:
+  %gep.src.0 = getelementptr inbounds i8, ptr %src, i8 0
+  %l.src.0 = load i8, ptr %gep.src.0, align 4
+  %mul.0 = mul nsw i8 %l.src.0, 10
+  store i8 %mul.0, ptr %dst
+
+  %gep.src.1 = getelementptr inbounds i8, ptr %src, i8 1
+  %l.src.1 = load i8, ptr %gep.src.1, align 4
+  %mul.1 = mul nsw i8 %l.src.1, 10
+  %dst.1 = getelementptr i8, ptr %dst, i8 1
+  store i8 %mul.1, ptr %dst.1
+
+  %gep.src.2 = getelementptr inbounds i8, ptr %src, i8 2
+  %l.src.2 = load i8, ptr %gep.src.2, align 4
+  %mul.2 = mul nsw i8 %l.src.2, 10
+  %dst.2 = getelementptr i8, ptr %dst, i8 2
+  store i8 %mul.2, ptr %dst.2
+
+  %gep.src.3 = getelementptr inbounds i8, ptr %src, i8 3
+  %l.src.3 = load i8, ptr %gep.src.3, align 4
+  %mul.3 = mul nsw i8 %l.src.3, 10
+  %dst.3 = getelementptr i8, ptr %dst, i8 3
+  store i8 %mul.3, ptr %dst.3
+
+  %gep.src.4 = getelementptr inbounds i8, ptr %src, i8 4
+  %l.src.4 = load i8, ptr %gep.src.4, align 4
+  %mul.4 = mul nsw i8 %l.src.4, 10
+  %dst.4 = getelementptr i8, ptr %dst, i8 4
+  store i8 %mul.4, ptr %dst.4
+
+  %gep.src.5 = getelementptr inbounds i8, ptr %src, i8 5
+  %l.src.5 = load i8, ptr %gep.src.5, align 4
+  %mul.5 = mul nsw i8 %l.src.5, 10
+  %dst.5 = getelementptr i8, ptr %dst, i8 5
+  store i8 %mul.5, ptr %dst.5
+
+  %gep.src.6 = getelementptr inbounds i8, ptr %src, i8 6
+  %l.src.6 = load i8, ptr %gep.src.6, align 4
+  %mul.6 = mul nsw i8 %l.src.6, 10
+  %dst.6 = getelementptr i8, ptr %dst, i8 6
+  store i8 %mul.6, ptr %dst.6
+
+  %gep.src.7 = getelementptr inbounds i8, ptr %src, i8 7
+  %l.src.7 = load i8, ptr %gep.src.7, align 4
+  %mul.7 = mul nsw i8 %l.src.7, 10
+  %dst.7 = getelementptr i8, ptr %dst, i8 7
+  store i8 %mul.7, ptr %dst.7
+
+  %gep.src.8 = getelementptr inbounds i8, ptr %src, i8 8
+  %l.src.8 = load i8, ptr %gep.src.8, align 4
+  %mul.8 = mul nsw i8 %l.src.8, 10
+  %dst.8 = getelementptr i8, ptr %dst, i8 8
+  store i8 %mul.8, ptr %dst.8
+
+  %gep.src.9 = getelementptr inbounds i8, ptr %src, i8 9
+  %l.src.9 = load i8, ptr %gep.src.9, align 4
+  %mul.9 = mul nsw i8 %l.src.9, 10
+  %dst.9 = getelementptr i8, ptr %dst, i8 9
+  store i8 %mul.9, ptr %dst.9
+
+  %gep.src.10 = getelementptr inbounds i8, ptr %src, i8 10
+  %l.src.10 = load i8, ptr %gep.src.10, align 4
+  %mul.10 = mul nsw i8 %l.src.10, 10
+  %dst.10 = getelementptr i8, ptr %dst, i8 10
+  store i8 %mul.10, ptr %dst.10
+
+  %gep.src.11 = getelementptr inbounds i8, ptr %src, i8 11
+  %l.src.11 = load i8, ptr %gep.src.11, align 4
+  %mul.11 = mul nsw i8 %l.src.11, 10
+  %dst.11 = getelementptr i8, ptr %dst, i8 11
+  store i8 %mul.11, ptr %dst.11
+
+  %gep.src.12 = getelementptr inbounds i8, ptr %src, i8 12
+  %l.src.12 = load i8, ptr %gep.src.12, align 4
+  %mul.12 = mul nsw i8 %l.src.12, 10
+  %dst.12 = getelementptr i8, ptr %dst, i8 12
+  store i8 %mul.12, ptr %dst.12
+
+  %gep.src.13 = getelementptr inbounds i8, ptr %src, i8 13
+  %l.src.13 = load i8, ptr %gep.src.13, align 4
+  %mul.13 = mul nsw i8 %l.src.13, 10
+  %dst.13 = getelementptr i8, ptr %dst, i8 13
+  store i8 %mul.13, ptr %dst.13
+
+  %gep.src.14 = getelementptr inbounds i8, ptr %src, i8 14
+  %l.src.14 = load i8, ptr %gep.src.14, align 4
+  %mul.14 = mul nsw i8 %l.src.14, 10
+  %dst.14 = getelementptr i8, ptr %dst, i8 14
+  store i8 %mul.14, ptr %dst.14
+
+  ret void
+}
+
+
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-base.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-base.ll
new file mode 100644
index 00000000000000..59ffbf7ef9b247
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-base.ll
@@ -0,0 +1,297 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
+
+define void @v3_load_i32_mul_by_constant_store(ptr %src, ptr %dst) {
+; CHECK-LABEL: @v3_load_i32_mul_by_constant_store(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i32 0
+; CHECK-NEXT:    [[GEP_SRC_2:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 2
+; CHECK-NEXT:    [[L_SRC_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 4
+; CHECK-NEXT:    [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_2]], 10
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = mul nsw <2 x i32> [[TMP0]], <i32 10, i32 10>
+; CHECK-NEXT:    store <2 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
+; CHECK-NEXT:    store i32 [[MUL_2]], ptr [[DST_2]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %gep.src.0 = getelementptr inbounds i32, ptr %src, i32 0
+  %l.src.0 = load i32, ptr %gep.src.0, align 4
+  %mul.0 = mul nsw i32 %l.src.0, 10
+
+  %gep.src.1 = getelementptr inbounds i32, ptr %src, i32 1
+  %l.src.1 = load i32, ptr %gep.src.1, align 4
+  %mul.1 = mul nsw i32 %l.src.1, 10
+
+  %gep.src.2 = getelementptr inbounds i32, ptr %src, i32 2
+  %l.src.2 = load i32, ptr %gep.src.2, align 4
+  %mul.2 = mul nsw i32 %l.src.2, 10
+
+  store i32 %mul.0, ptr %dst
+
+  %dst.1 = getelementptr i32, ptr %dst, i32 1
+  store i32 %mul.1, ptr %dst.1
+
+  %dst.2 = getelementptr i32, ptr %dst, i32 2
+  store i32 %mul.2, ptr %dst.2
+
+  ret void
+}
+
+define void @v3_load_i32_mul_store(ptr %src.1, ptr %src.2, ptr %dst) {
+; CHECK-LABEL: @v3_load_i32_mul_store(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
+; CHECK-NEXT:    [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
+; CHECK-NEXT:    [[GEP_SRC_1_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 2
+; CHECK-NEXT:    [[L_SRC_1_2:%.*]] = load i32, ptr [[GEP_SRC_1_2]], align 4
+; CHECK-NEXT:    [[GEP_SRC_2_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 2
+; CHECK-NEXT:    [[L_SRC_2_2:%.*]] = load i32, ptr [[GEP_SRC_2_2]], align 4
+; CHECK-NEXT:    [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_1_2]], [[L_SRC_2_2]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_1_0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[GEP_SRC_2_0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = mul nsw <2 x i32> [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    store <2 x i32> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
+; CHECK-NEXT:    store i32 [[MUL_2]], ptr [[DST_2]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %gep.src.1.0 = getelementptr inbounds i32, ptr %src.1, i32 0
+  %l.src.1.0 = load i32, ptr %gep.src.1.0, align 4
+  %gep.src.2.0 = getelementptr inbounds i32, ptr %src.2, i32 0
+  %l.src.2.0 = load i32, ptr %gep.src.2.0, align 4
+  %mul.0 = mul nsw i32 %l.src.1.0, %l.src.2.0
+
+  %gep.src.1.1 = getelementptr inbounds i32, ptr %src.1, i32 1
+  %l.src.1.1 = load i32, ptr %gep.src.1.1, align 4
+  %gep.src.2.1 = getelementptr inbounds i32, ptr %src.2, i32 1
+  %l.src.2.1 = load i32, ptr %gep.src.2.1, align 4
+  %mul.1 = mul nsw i32 %l.src.1.1, %l.src.2.1
+
+  %gep.src.1.2 = getelementptr inbounds i32, ptr %src.1, i32 2
+  %l.src.1.2 = load i32, ptr %gep.src.1.2, align 4
+  %gep.src.2.2 = getelementptr inbounds i32, ptr %src.2, i32 2
+  %l.src.2.2 = load i32, ptr %gep.src.2.2, align 4
+  %mul.2 = mul nsw i32 %l.src.1.2, %l.src.2.2
+
+  store i32 %mul.0, ptr %dst
+
+  %dst.1 = getelementptr i32, ptr %dst, i32 1
+  store i32 %mul.1, ptr %dst.1
+
+  %dst.2 = getelementptr i32, ptr %dst, i32 2
+  store i32 %mul.2, ptr %dst.2
+
+  ret void
+}
+
+define void @v3_load_i32_mul_add_const_store(ptr %src.1, ptr %src.2, ptr %dst) {
+; CHECK-LABEL: @v3_load_i32_mul_add_const_store(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
+; CHECK-NEXT:    [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
+; CHECK-NEXT:    [[GEP_SRC_1_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 2
+; CHECK-NEXT:    [[L_SRC_1_2:%.*]] = load i32, ptr [[GEP_SRC_1_2]], align 4
+; CHECK-NEXT:    [[GEP_SRC_2_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 2
+; CHECK-NEXT:    [[L_SRC_2_2:%.*]] = load i32, ptr [[GEP_SRC_2_2]], align 4
+; CHECK-NEXT:    [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_1_2]], [[L_SRC_2_2]]
+; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[MUL_2]], 9
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_1_0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[GEP_SRC_2_0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = mul nsw <2 x i32> [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = add <2 x i32> [[TMP2]], <i32 9, i32 9>
+; CHECK-NEXT:    store <2 x i32> [[TMP3]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
+; CHECK-NEXT:    store i32 [[ADD_2]], ptr [[DST_2]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %gep.src.1.0 = getelementptr inbounds i32, ptr %src.1, i32 0
+  %l.src.1.0 = load i32, ptr %gep.src.1.0, align 4
+  %gep.src.2.0 = getelementptr inbounds i32, ptr %src.2, i32 0
+  %l.src.2.0 = load i32, ptr %gep.src.2.0, align 4
+  %mul.0 = mul nsw i32 %l.src.1.0, %l.src.2.0
+  %add.0 = add i32 %mul.0, 9
+
+  %gep.src.1.1 = getelementptr inbounds i32, ptr %src.1, i32 1
+  %l.src.1.1 = load i32, ptr %gep.src.1.1, align 4
+  %gep.src.2.1 = getelementptr inbounds i32, ptr %src.2, i32 1
+  %l.src.2.1 = load i32, ptr %gep.src.2.1, align 4
+  %mul.1 = mul nsw i32 %l.src.1.1, %l.src.2.1
+  %add.1 = add i32 %mul.1, 9
+
+  %gep.src.1.2 = getelementptr inbounds i32, ptr %src.1, i32 2
+  %l.src.1.2 = load i32, ptr %gep.src.1.2, align 4
+  %gep.src.2.2 = getelementptr inbounds i32, ptr %src.2, i32 2
+  %l.src.2.2 = load i32, ptr %gep.src.2.2, align 4
+  %mul.2 = mul nsw i32 %l.src.1.2, %l.src.2.2
+  %add.2 = add i32 %mul.2, 9
+
+  store i32 %add.0, ptr %dst
+
+  %dst.1 = getelementptr i32, ptr %dst, i32 1
+  store i32 %add.1, ptr %dst.1
+
+  %dst.2 = getelementptr i32, ptr %dst, i32 2
+  store i32 %add.2, ptr %dst.2
+
+  ret void
+}
+
+define void @v3_load_f32_fadd_fadd_by_constant_store(ptr %src, ptr %dst) {
+; CHECK-LABEL: @v3_load_f32_fadd_fadd_by_constant_store(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i32 0
+; CHECK-NEXT:    [[GEP_SRC_2:%.*]] = getelementptr inbounds float, ptr [[SRC]], i32 2
+; CHECK-NEXT:    [[L_SRC_2:%.*]] = load float, ptr [[GEP_SRC_2]], align 4
+; CHECK-NEXT:    [[FADD_2:%.*]] = fadd float [[L_SRC_2]], 1.000000e+01
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd <2 x float> [[TMP0]], <float 1.000000e+01, float 1.000000e+01>
+; CHECK-NEXT:    store <2 x float> [[TMP1]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[DST_2:%.*]] = getelementptr float, ptr [[DST]], i32 2
+; CHECK-NEXT:    store float [[FADD_2]], ptr [[DST_2]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %gep.src.0 = getelementptr inbounds float, ptr %src, i32 0
+  %l.src.0 = load float , ptr %gep.src.0, align 4
+  %fadd.0 = fadd float %l.src.0, 10.0
+
+  %gep.src.1 = getelementptr inbounds float , ptr %src, i32 1
+  %l.src.1 = load float, ptr %gep.src.1, align 4
+  %fadd.1 = fadd float %l.src.1, 10.0
+
+  %gep.src.2 = getelementptr inbounds float, ptr %src, i32 2
+  %l.src.2 = load float, ptr %gep.src.2, align 4
+  %fadd.2 = fadd float %l.src.2, 10.0
+
+  store float %fadd.0, ptr %dst
+
+  %dst.1 = getelementptr float, ptr %dst, i32 1
+  store float %fadd.1, ptr %dst.1
+
+  %dst.2 = getelementptr float, ptr %dst, i32 2
+  store float %fadd.2, ptr %dst.2
+
+  ret void
+}
+
+define void @phi_store3(ptr %dst) {
+; CHECK-LABEL: @phi_store3(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[EXIT:%.*]]
+; CHECK:       invoke.cont8.loopexit:
+; CHECK-NEXT:    br label [[EXIT]]
+; CHECK:       exit:
+; CHECK-NEXT:    [[P_2:%.*]] = phi i32 [ 3, [[ENTRY:%.*]] ], [ 0, [[INVOKE_CONT8_LOOPEXIT:%.*]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x i32> [ <i32 1, i32 2>, [[ENTRY]] ], [ poison, [[INVOKE_CONT8_LOOPEXIT]] ]
+; CHECK-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST:%.*]], i32 2
+; CHECK-NEXT:    store <2 x i32> [[TMP0]], ptr [[DST]], align 4
+; CHECK-NEXT:    store i32 [[P_2]], ptr [[DST_2]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  br label %exit
+
+invoke.cont8.loopexit:                            ; No predecessors!
+  br label %exit
+
+exit:
+  %p.0 = phi i32 [ 1, %entry ], [ 0, %invoke.cont8.loopexit ]
+  %p.1 = phi i32 [ 2, %entry ], [ 0, %invoke.cont8.loopexit ]
+  %p.2 = phi i32 [ 3, %entry ], [ 0, %invoke.cont8.loopexit ]
+
+  %dst.1 = getelementptr i32, ptr %dst, i32 1
+  %dst.2 = getelementptr i32, ptr %dst, i32 2
+
+  store i32 %p.0, ptr %dst, align 4
+  store i32 %p.1, ptr %dst.1, align 4
+  store i32 %p.2, ptr %dst.2, align 4
+  ret void
+}
+
+define void @store_try_reorder(ptr %dst) {
+; CHECK-LABEL: @store_try_reorder(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 0, 0
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_I1887:%.*]] = getelementptr i32, ptr [[DST]], i64 1
+; CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[ARRAYIDX_I1887]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %add = add i32 0, 0
+  store i32 %add, ptr %dst, align 4
+  %add207 = sub i32 0, 0
+  %arrayidx.i1887 = getelementptr i32, ptr %dst, i64 1
+  store i32 %add207, ptr %arrayidx.i1887, align 4
+  %add216 = sub i32 0, 0
+  %arrayidx.i1891 = getelementptr i32, ptr %dst, i64 2
+  store i32 %add216, ptr %arrayidx.i1891, align 4
+  ret void
+}
+
+define void @vec3_fpext_cost(ptr %Colour, float %0) {
+; CHECK-LABEL: @vec3_fpext_cost(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX80:%.*]] = getelementptr float, ptr [[COLOUR:%.*]], i64 2
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x float> poison, float [[TMP0:%.*]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = fpext <2 x float> [[TMP2]] to <2 x double>
+; CHECK-NEXT:    [[TMP4:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP3]], <2 x double> zeroinitializer, <2 x double> zeroinitializer)
+; CHECK-NEXT:    [[TMP5:%.*]] = fptrunc <2 x double> [[TMP4]] to <2 x float>
+; CHECK-NEXT:    store <2 x float> [[TMP5]], ptr [[COLOUR]], align 4
+; CHECK-NEXT:    [[CONV78:%.*]] = fpext float [[TMP0]] to double
+; CHECK-NEXT:    [[TMP6:%.*]] = call double @llvm.fmuladd.f64(double [[CONV78]], double 0.000000e+00, double 0.000000e+00)
+; CHECK-NEXT:    [[CONV82:%.*]] = fptrunc double [[TMP6]] to float
+; CHECK-NEXT:    store float [[CONV82]], ptr [[ARRAYIDX80]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arrayidx72 = getelementptr float, ptr %Colour, i64 1
+  %arrayidx80 = getelementptr float, ptr %Colour, i64 2
+  %conv62 = fpext float %0 to double
+  %1 = call double @llvm.fmuladd.f64(double %conv62, double 0.000000e+00, double 0.000000e+00)
+  %conv66 = fptrunc double %1 to float
+  store float %conv66, ptr %Colour, align 4
+  %conv70 = fpext float %0 to double
+  %2 = call double @llvm.fmuladd.f64(double %conv70, double 0.000000e+00, double 0.000000e+00)
+  %conv74 = fptrunc double %2 to float
+  store float %conv74, ptr %arrayidx72, align 4
+  %conv78 = fpext float %0 to double
+  %3 = call double @llvm.fmuladd.f64(double %conv78, double 0.000000e+00, double 0.000000e+00)
+  %conv82 = fptrunc double %3 to float
+  store float %conv82, ptr %arrayidx80, align 4
+  ret void
+}
+
+define void @fpext_gather(ptr %dst, double %conv) {
+; CHECK-LABEL: @fpext_gather(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> poison, double [[CONV:%.*]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP2:%.*]] = fptrunc <2 x double> [[TMP1]] to <2 x float>
+; CHECK-NEXT:    [[LENGTHS:%.*]] = getelementptr float, ptr [[DST:%.*]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x float> [[TMP2]], i32 0
+; CHECK-NEXT:    store float [[TMP3]], ptr [[LENGTHS]], align 4
+; CHECK-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr float, ptr [[DST]], i64 1
+; CHECK-NEXT:    store <2 x float> [[TMP2]], ptr [[ARRAYIDX32]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %conv25 = fptrunc double %conv to float
+  %Lengths = getelementptr float, ptr %dst, i64 0
+  store float %conv25, ptr %Lengths, align 4
+  %arrayidx32 = getelementptr float, ptr %dst, i64 1
+  store float %conv25, ptr %arrayidx32, align 4
+  %conv34 = fptrunc double %conv to float
+  %arrayidx37 = getelementptr float, ptr %dst, i64 2
+  store float %conv34, ptr %arrayidx37, align 4
+  ret void
+}
+
+declare float @llvm.fmuladd.f32(float, float, float)
+
+declare double @llvm.fmuladd.f64(double, double, double)
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-calls.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-calls.ll
new file mode 100644
index 00000000000000..2cb84eeb7fc8f4
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-calls.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
+
+define void @vec3_vectorize_call(ptr %Colour, float %0) {
+; CHECK-LABEL: @vec3_vectorize_call(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[COLOUR:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP1]], <2 x float> zeroinitializer, <2 x float> zeroinitializer)
+; CHECK-NEXT:    store <2 x float> [[TMP2]], ptr [[COLOUR]], align 4
+; CHECK-NEXT:    [[ARRAYIDX99_I1:%.*]] = getelementptr float, ptr [[COLOUR]], i64 2
+; CHECK-NEXT:    [[TMP3:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0:%.*]], float 0.000000e+00, float 0.000000e+00)
+; CHECK-NEXT:    store float [[TMP3]], ptr [[ARRAYIDX99_I1]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %1 = load float, ptr %Colour, align 4
+  %2 = call float @llvm.fmuladd.f32(float %1, float 0.000000e+00, float 0.000000e+00)
+  store float %2, ptr %Colour, align 4
+  %arrayidx91.i = getelementptr float, ptr %Colour, i64 1
+  %3 = load float, ptr %arrayidx91.i, align 4
+  %4 = call float @llvm.fmuladd.f32(float %3, float 0.000000e+00, float 0.000000e+00)
+  store float %4, ptr %arrayidx91.i, align 4
+  %arrayidx99.i1 = getelementptr float, ptr %Colour, i64 2
+  %5 = call float @llvm.fmuladd.f32(float %0, float 0.000000e+00, float 0.000000e+00)
+  store float %5, ptr %arrayidx99.i1, align 4
+  ret void
+}
+
+define void @vec3_fmuladd_64(ptr %Colour, double %0) {
+; CHECK-LABEL: @vec3_fmuladd_64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX80:%.*]] = getelementptr float, ptr [[COLOUR:%.*]], i64 2
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> poison, double [[TMP0:%.*]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[TMP2]], <2 x double> zeroinitializer, <2 x double> zeroinitializer)
+; CHECK-NEXT:    [[TMP4:%.*]] = fptrunc <2 x double> [[TMP3]] to <2 x float>
+; CHECK-NEXT:    store <2 x float> [[TMP4]], ptr [[COLOUR]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = call double @llvm.fmuladd.f64(double [[TMP0]], double 0.000000e+00, double 0.000000e+00)
+; CHECK-NEXT:    [[CONV82:%.*]] = fptrunc double [[TMP5]] to float
+; CHECK-NEXT:    store float [[CONV82]], ptr [[ARRAYIDX80]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arrayidx72 = getelementptr float, ptr %Colour, i64 1
+  %arrayidx80 = getelementptr float, ptr %Colour, i64 2
+  %1 = call double @llvm.fmuladd.f64(double %0, double 0.000000e+00, double 0.000000e+00)
+  %conv66 = fptrunc double %1 to float
+  store float %conv66, ptr %Colour, align 4
+  %2 = call double @llvm.fmuladd.f64(double %0, double 0.000000e+00, double 0.000000e+00)
+  %conv74 = fptrunc double %2 to float
+  store float %conv74, ptr %arrayidx72, align 4
+  %3 = call double @llvm.fmuladd.f64(double %0, double 0.000000e+00, double 0.000000e+00)
+  %conv82 = fptrunc double %3 to float
+  store float %conv82, ptr %arrayidx80, align 4
+  ret void
+}
+
+declare float @llvm.fmuladd.f32(float, float, float)
+
+declare double @llvm.fmuladd.f64(double, double, double)
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll
new file mode 100644
index 00000000000000..5707e143ad5515
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll
@@ -0,0 +1,516 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes=slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
+
+%struct.zot = type { i32, i32, i32 }
+
+define i1 @reorder_results(ptr %arg, i1 %arg1, ptr %arg2, i64 %arg3, ptr %arg4) {
+; CHECK-LABEL: define i1 @reorder_results(
+; CHECK-SAME: ptr [[ARG:%.*]], i1 [[ARG1:%.*]], ptr [[ARG2:%.*]], i64 [[ARG3:%.*]], ptr [[ARG4:%.*]]) {
+; CHECK-NEXT:  bb:
+; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[ARG4]], align 8
+; CHECK-NEXT:    [[LOAD4:%.*]] = load i32, ptr [[LOAD]], align 4
+; CHECK-NEXT:    [[GETELEMENTPTR:%.*]] = getelementptr i8, ptr [[LOAD]], i64 4
+; CHECK-NEXT:    [[LOAD5:%.*]] = load i32, ptr [[GETELEMENTPTR]], align 4
+; CHECK-NEXT:    [[GETELEMENTPTR6:%.*]] = getelementptr i8, ptr [[LOAD]], i64 8
+; CHECK-NEXT:    [[LOAD7:%.*]] = load i32, ptr [[GETELEMENTPTR6]], align 4
+; CHECK-NEXT:    br i1 [[ARG1]], label [[BB12:%.*]], label [[BB9:%.*]]
+; CHECK:       bb8:
+; CHECK-NEXT:    ret i1 false
+; CHECK:       bb9:
+; CHECK-NEXT:    [[FREEZE:%.*]] = freeze ptr [[ARG]]
+; CHECK-NEXT:    store i32 [[LOAD4]], ptr [[FREEZE]], align 4
+; CHECK-NEXT:    [[GETELEMENTPTR10:%.*]] = getelementptr i8, ptr [[FREEZE]], i64 4
+; CHECK-NEXT:    store i32 [[LOAD7]], ptr [[GETELEMENTPTR10]], align 4
+; CHECK-NEXT:    [[GETELEMENTPTR11:%.*]] = getelementptr i8, ptr [[FREEZE]], i64 8
+; CHECK-NEXT:    store i32 [[LOAD5]], ptr [[GETELEMENTPTR11]], align 4
+; CHECK-NEXT:    br label [[BB8:%.*]]
+; CHECK:       bb12:
+; CHECK-NEXT:    [[GETELEMENTPTR13:%.*]] = getelementptr [[STRUCT_ZOT:%.*]], ptr [[ARG2]], i64 [[ARG3]]
+; CHECK-NEXT:    store i32 [[LOAD4]], ptr [[GETELEMENTPTR13]], align 4
+; CHECK-NEXT:    [[GETELEMENTPTR14:%.*]] = getelementptr i8, ptr [[GETELEMENTPTR13]], i64 4
+; CHECK-NEXT:    store i32 [[LOAD7]], ptr [[GETELEMENTPTR14]], align 4
+; CHECK-NEXT:    [[GETELEMENTPTR15:%.*]] = getelementptr i8, ptr [[GETELEMENTPTR13]], i64 8
+; CHECK-NEXT:    store i32 [[LOAD5]], ptr [[GETELEMENTPTR15]], align 4
+; CHECK-NEXT:    br label [[BB8]]
+;
+bb:
+  %load = load ptr, ptr %arg4, align 8
+  %load4 = load i32, ptr %load, align 4
+  %getelementptr = getelementptr i8, ptr %load, i64 4
+  %load5 = load i32, ptr %getelementptr, align 4
+  %getelementptr6 = getelementptr i8, ptr %load, i64 8
+  %load7 = load i32, ptr %getelementptr6, align 4
+  br i1 %arg1, label %bb12, label %bb9
+
+bb8:                                              ; preds = %bb12, %bb9
+  ret i1 false
+
+bb9:                                              ; preds = %bb
+  %freeze = freeze ptr %arg
+  store i32 %load4, ptr %freeze, align 4
+  %getelementptr10 = getelementptr i8, ptr %freeze, i64 4
+  store i32 %load7, ptr %getelementptr10, align 4
+  %getelementptr11 = getelementptr i8, ptr %freeze, i64 8
+  store i32 %load5, ptr %getelementptr11, align 4
+  br label %bb8
+
+bb12:                                             ; preds = %bb
+  %getelementptr13 = getelementptr %struct.zot, ptr %arg2, i64 %arg3
+  store i32 %load4, ptr %getelementptr13, align 4
+  %getelementptr14 = getelementptr i8, ptr %getelementptr13, i64 4
+  store i32 %load7, ptr %getelementptr14, align 4
+  %getelementptr15 = getelementptr i8, ptr %getelementptr13, i64 8
+  store i32 %load5, ptr %getelementptr15, align 4
+  br label %bb8
+}
+
+define void @extract_mask(ptr %object, double %conv503, double %conv520) {
+; CHECK-LABEL: define void @extract_mask(
+; CHECK-SAME: ptr [[OBJECT:%.*]], double [[CONV503:%.*]], double [[CONV520:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[OBJECT]], align 8
+; CHECK-NEXT:    [[BBOX483:%.*]] = getelementptr float, ptr [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[BBOX483]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = fpext <2 x float> [[TMP1]] to <2 x double>
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[CONV503]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = fcmp ogt <2 x double> [[TMP4]], <double 0.000000e+00, double -2.000000e+10>
+; CHECK-NEXT:    [[TMP6:%.*]] = select <2 x i1> [[TMP5]], <2 x double> [[TMP3]], <2 x double> <double 0.000000e+00, double -2.000000e+10>
+; CHECK-NEXT:    [[TMP7:%.*]] = fsub <2 x double> zeroinitializer, [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = fptrunc <2 x double> [[TMP7]] to <2 x float>
+; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <2 x float> [[TMP8]], i32 0
+; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <2 x float> [[TMP8]], i32 1
+; CHECK-NEXT:    [[MUL646:%.*]] = fmul float [[TMP9]], [[TMP10]]
+; CHECK-NEXT:    [[CMP663:%.*]] = fcmp olt float [[MUL646]], 0.000000e+00
+; CHECK-NEXT:    br i1 [[CMP663]], label [[IF_THEN665:%.*]], label [[IF_END668:%.*]]
+; CHECK:       if.then665:
+; CHECK-NEXT:    [[ARRAYIDX656:%.*]] = getelementptr float, ptr [[OBJECT]], i64 10
+; CHECK-NEXT:    [[BBOX651:%.*]] = getelementptr float, ptr [[OBJECT]]
+; CHECK-NEXT:    [[CONV621:%.*]] = fptrunc double [[CONV520]] to float
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <2 x double> [[TMP6]], <2 x double> poison, <2 x i32> <i32 poison, i32 0>
+; CHECK-NEXT:    [[TMP12:%.*]] = insertelement <2 x double> [[TMP11]], double [[CONV503]], i32 0
+; CHECK-NEXT:    [[TMP13:%.*]] = fptrunc <2 x double> [[TMP12]] to <2 x float>
+; CHECK-NEXT:    store <2 x float> [[TMP13]], ptr [[BBOX651]], align 8
+; CHECK-NEXT:    [[BBOX_SROA_8_0_BBOX666_SROA_IDX:%.*]] = getelementptr float, ptr [[OBJECT]], i64 2
+; CHECK-NEXT:    store float [[CONV621]], ptr [[BBOX_SROA_8_0_BBOX666_SROA_IDX]], align 8
+; CHECK-NEXT:    store <2 x float> [[TMP8]], ptr [[ARRAYIDX656]], align 8
+; CHECK-NEXT:    br label [[IF_END668]]
+; CHECK:       if.end668:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = load ptr, ptr %object, align 8
+  %bbox483 = getelementptr float, ptr %0
+  %1 = load float, ptr %bbox483, align 8
+  %conv486 = fpext float %1 to double
+  %cmp487 = fcmp ogt double %conv486, -2.000000e+10
+  %conv486.2 = select i1 %cmp487, double %conv486, double -2.000000e+10
+  %arrayidx502 = getelementptr float, ptr %0, i64 1
+  %2 = load float, ptr %arrayidx502, align 4
+  %conv5033 = fpext float %2 to double
+  %cmp504 = fcmp ogt double %conv503, 0.000000e+00
+  %cond514 = select i1 %cmp504, double %conv5033, double 0.000000e+00
+  %sub626 = fsub double 0.000000e+00, %conv486.2
+  %conv627 = fptrunc double %sub626 to float
+  %sub632 = fsub double 0.000000e+00, %cond514
+  %conv633 = fptrunc double %sub632 to float
+  %mul646 = fmul float %conv633, %conv627
+  %cmp663 = fcmp olt float %mul646, 0.000000e+00
+  br i1 %cmp663, label %if.then665, label %if.end668
+
+if.then665:                                       ; preds = %entry
+  %arrayidx656 = getelementptr float, ptr %object, i64 10
+  %lengths652 = getelementptr float, ptr %object, i64 11
+  %bbox651 = getelementptr float, ptr %object
+  %conv621 = fptrunc double %conv520 to float
+  %conv617 = fptrunc double %cond514 to float
+  %conv613 = fptrunc double %conv503 to float
+  store float %conv613, ptr %bbox651, align 8
+  %bbox.sroa.6.0.bbox666.sroa_idx = getelementptr float, ptr %object, i64 1
+  store float %conv617, ptr %bbox.sroa.6.0.bbox666.sroa_idx, align 4
+  %bbox.sroa.8.0.bbox666.sroa_idx = getelementptr float, ptr %object, i64 2
+  store float %conv621, ptr %bbox.sroa.8.0.bbox666.sroa_idx, align 8
+  store float %conv627, ptr %lengths652, align 4
+  store float %conv633, ptr %arrayidx656, align 8
+  br label %if.end668
+
+if.end668:                                        ; preds = %if.then665, %entry
+  ret void
+}
+
+define void @gather_2(ptr %mat1, float %0, float %1) {
+; CHECK-LABEL: define void @gather_2(
+; CHECK-SAME: ptr [[MAT1:%.*]], float [[TMP0:%.*]], float [[TMP1:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP2:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0]], float 0.000000e+00, float 0.000000e+00)
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x float> [[TMP3]], float [[TMP0]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT:    [[TMP6:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP4]], <2 x float> [[TMP5]], <2 x float> zeroinitializer)
+; CHECK-NEXT:    [[TMP7:%.*]] = fmul float [[TMP2]], 0.000000e+00
+; CHECK-NEXT:    [[ARRAYIDX163:%.*]] = getelementptr [4 x [4 x float]], ptr [[MAT1]], i64 0, i64 1
+; CHECK-NEXT:    [[ARRAYIDX2_I_I_I278:%.*]] = getelementptr [4 x [4 x float]], ptr [[MAT1]], i64 0, i64 1, i64 1
+; CHECK-NEXT:    store float [[TMP7]], ptr [[ARRAYIDX163]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul <2 x float> [[TMP6]], zeroinitializer
+; CHECK-NEXT:    store <2 x float> [[TMP8]], ptr [[ARRAYIDX2_I_I_I278]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %2 = call float @llvm.fmuladd.f32(float %0, float 0.000000e+00, float 0.000000e+00)
+  %3 = call float @llvm.fmuladd.f32(float %1, float %0, float 0.000000e+00)
+  %4 = call float @llvm.fmuladd.f32(float %0, float %1, float 0.000000e+00)
+  %5 = fmul float %2, 0.000000e+00
+  %6 = fmul float %3, 0.000000e+00
+  %7 = fmul float %4, 0.000000e+00
+  %arrayidx163 = getelementptr [4 x [4 x float]], ptr %mat1, i64 0, i64 1
+  %arrayidx2.i.i.i278 = getelementptr [4 x [4 x float]], ptr %mat1, i64 0, i64 1, i64 1
+  %arrayidx5.i.i.i280 = getelementptr [4 x [4 x float]], ptr %mat1, i64 0, i64 1, i64 2
+  store float %5, ptr %arrayidx163, align 4
+  store float %6, ptr %arrayidx2.i.i.i278, align 4
+  store float %7, ptr %arrayidx5.i.i.i280, align 4
+  ret void
+}
+
+define i32 @reorder_indices_1(float %0) {
+; CHECK-LABEL: define i32 @reorder_indices_1(
+; CHECK-SAME: float [[TMP0:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[NOR1:%.*]] = alloca [0 x [3 x float]], i32 0, align 4
+; CHECK-NEXT:    [[ARRAYIDX2_I265:%.*]] = getelementptr float, ptr [[NOR1]], i64 2
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2_I265]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x float>, ptr [[NOR1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x float> [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = fneg float [[TMP3]]
+; CHECK-NEXT:    [[NEG11_I:%.*]] = fmul float [[TMP4]], [[TMP0]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call float @llvm.fmuladd.f32(float [[TMP1]], float 0.000000e+00, float [[NEG11_I]])
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> [[TMP2]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = fneg <2 x float> [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i32 0
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul <2 x float> [[TMP8]], [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <2 x float> [[TMP11]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT:    [[TMP13:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP2]], <2 x float> zeroinitializer, <2 x float> [[TMP12]])
+; CHECK-NEXT:    [[TMP14:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP10]], <2 x float> [[TMP13]], <2 x float> zeroinitializer)
+; CHECK-NEXT:    [[TMP15:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP5]], float 0.000000e+00)
+; CHECK-NEXT:    [[TMP16:%.*]] = fmul <2 x float> [[TMP14]], zeroinitializer
+; CHECK-NEXT:    [[MUL6_I_I_I:%.*]] = fmul float [[TMP15]], 0.000000e+00
+; CHECK-NEXT:    store <2 x float> [[TMP16]], ptr [[NOR1]], align 4
+; CHECK-NEXT:    store float [[MUL6_I_I_I]], ptr [[ARRAYIDX2_I265]], align 4
+; CHECK-NEXT:    ret i32 0
+;
+entry:
+  %nor1 = alloca [0 x [3 x float]], i32 0, align 4
+  %arrayidx.i = getelementptr float, ptr %nor1, i64 1
+  %1 = load float, ptr %arrayidx.i, align 4
+  %arrayidx2.i265 = getelementptr float, ptr %nor1, i64 2
+  %2 = load float, ptr %arrayidx2.i265, align 4
+  %3 = fneg float %2
+  %neg.i267 = fmul float %3, %0
+  %4 = call float @llvm.fmuladd.f32(float %1, float 0.000000e+00, float %neg.i267)
+  %5 = load float, ptr %nor1, align 4
+  %6 = fneg float %5
+  %neg11.i = fmul float %6, %0
+  %7 = call float @llvm.fmuladd.f32(float %2, float 0.000000e+00, float %neg11.i)
+  %8 = fneg float %1
+  %neg18.i = fmul float %8, %0
+  %9 = call float @llvm.fmuladd.f32(float %5, float 0.000000e+00, float %neg18.i)
+  %10 = call float @llvm.fmuladd.f32(float %0, float %9, float 0.000000e+00)
+  %11 = call float @llvm.fmuladd.f32(float %0, float %4, float 0.000000e+00)
+  %12 = call float @llvm.fmuladd.f32(float %0, float %7, float 0.000000e+00)
+  %mul.i.i.i = fmul float %10, 0.000000e+00
+  %mul3.i.i.i = fmul float %11, 0.000000e+00
+  %mul6.i.i.i = fmul float %12, 0.000000e+00
+  store float %mul.i.i.i, ptr %nor1, align 4
+  store float %mul3.i.i.i, ptr %arrayidx.i, align 4
+  store float %mul6.i.i.i, ptr %arrayidx2.i265, align 4
+  ret i32 0
+}
+
+define void @reorder_indices_2(ptr %spoint) {
+; CHECK-LABEL: define void @reorder_indices_2(
+; CHECK-SAME: ptr [[SPOINT:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <3 x float> zeroinitializer, i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float 0.000000e+00, float 0.000000e+00)
+; CHECK-NEXT:    [[MUL4_I461:%.*]] = fmul float [[TMP1]], 0.000000e+00
+; CHECK-NEXT:    [[DSCO:%.*]] = getelementptr float, ptr [[SPOINT]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> zeroinitializer, <2 x float> zeroinitializer, <2 x float> zeroinitializer)
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x float> [[TMP2]], zeroinitializer
+; CHECK-NEXT:    store <2 x float> [[TMP3]], ptr [[DSCO]], align 4
+; CHECK-NEXT:    [[ARRAYIDX5_I476:%.*]] = getelementptr float, ptr [[SPOINT]], i64 2
+; CHECK-NEXT:    store float [[MUL4_I461]], ptr [[ARRAYIDX5_I476]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = extractelement <3 x float> zeroinitializer, i64 1
+  %1 = extractelement <3 x float> zeroinitializer, i64 2
+  %2 = extractelement <3 x float> zeroinitializer, i64 0
+  %3 = tail call float @llvm.fmuladd.f32(float %0, float 0.000000e+00, float 0.000000e+00)
+  %4 = tail call float @llvm.fmuladd.f32(float %1, float 0.000000e+00, float 0.000000e+00)
+  %5 = tail call float @llvm.fmuladd.f32(float %2, float 0.000000e+00, float 0.000000e+00)
+  %mul.i457 = fmul float %3, 0.000000e+00
+  %mul2.i459 = fmul float %4, 0.000000e+00
+  %mul4.i461 = fmul float %5, 0.000000e+00
+  %dsco = getelementptr float, ptr %spoint, i64 0
+  store float %mul.i457, ptr %dsco, align 4
+  %arrayidx3.i474 = getelementptr float, ptr %spoint, i64 1
+  store float %mul2.i459, ptr %arrayidx3.i474, align 4
+  %arrayidx5.i476 = getelementptr float, ptr %spoint, i64 2
+  store float %mul4.i461, ptr %arrayidx5.i476, align 4
+  ret void
+}
+
+define void @reorder_indices_2x_load(ptr %png_ptr, ptr %info_ptr) {
+; CHECK-LABEL: define void @reorder_indices_2x_load(
+; CHECK-SAME: ptr [[PNG_PTR:%.*]], ptr [[INFO_PTR:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[BIT_DEPTH:%.*]] = getelementptr i8, ptr [[INFO_PTR]], i64 0
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[BIT_DEPTH]], align 4
+; CHECK-NEXT:    [[COLOR_TYPE:%.*]] = getelementptr i8, ptr [[INFO_PTR]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[COLOR_TYPE]], align 1
+; CHECK-NEXT:    [[BIT_DEPTH37_I:%.*]] = getelementptr i8, ptr [[PNG_PTR]], i64 11
+; CHECK-NEXT:    store i8 [[TMP0]], ptr [[BIT_DEPTH37_I]], align 1
+; CHECK-NEXT:    [[COLOR_TYPE39_I:%.*]] = getelementptr i8, ptr [[PNG_PTR]], i64 10
+; CHECK-NEXT:    store i8 [[TMP1]], ptr [[COLOR_TYPE39_I]], align 2
+; CHECK-NEXT:    [[USR_BIT_DEPTH_I:%.*]] = getelementptr i8, ptr [[PNG_PTR]], i64 12
+; CHECK-NEXT:    store i8 [[TMP0]], ptr [[USR_BIT_DEPTH_I]], align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %bit_depth = getelementptr i8, ptr %info_ptr, i64 0
+  %0 = load i8, ptr %bit_depth, align 4
+  %color_type = getelementptr i8, ptr %info_ptr, i64 1
+  %1 = load i8, ptr %color_type, align 1
+  %bit_depth37.i = getelementptr i8, ptr %png_ptr, i64 11
+  store i8 %0, ptr %bit_depth37.i, align 1
+  %color_type39.i = getelementptr i8, ptr %png_ptr, i64 10
+  store i8 %1, ptr %color_type39.i, align 2
+  %usr_bit_depth.i = getelementptr i8, ptr %png_ptr, i64 12
+  store i8 %0, ptr %usr_bit_depth.i, align 8
+  ret void
+}
+
+define void @reuse_shuffle_indidces_1(ptr %col, float %0, float %1) {
+; CHECK-LABEL: define void @reuse_shuffle_indidces_1(
+; CHECK-SAME: ptr [[COL:%.*]], float [[TMP0:%.*]], float [[TMP1:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x float> [[TMP2]], float [[TMP0]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x float> [[TMP3]], zeroinitializer
+; CHECK-NEXT:    [[TMP5:%.*]] = fadd <2 x float> [[TMP4]], zeroinitializer
+; CHECK-NEXT:    store <2 x float> [[TMP5]], ptr [[COL]], align 4
+; CHECK-NEXT:    [[ARRAYIDX33:%.*]] = getelementptr float, ptr [[COL]], i64 2
+; CHECK-NEXT:    [[MUL38:%.*]] = fmul float [[TMP0]], 0.000000e+00
+; CHECK-NEXT:    [[TMP6:%.*]] = fadd float [[MUL38]], 0.000000e+00
+; CHECK-NEXT:    store float [[TMP6]], ptr [[ARRAYIDX33]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %mul24 = fmul float %1, 0.000000e+00
+  %2 = fadd float %mul24, 0.000000e+00
+  store float %2, ptr %col, align 4
+  %arrayidx26 = getelementptr float, ptr %col, i64 1
+  %mul31 = fmul float %0, 0.000000e+00
+  %3 = fadd float %mul31, 0.000000e+00
+  store float %3, ptr %arrayidx26, align 4
+  %arrayidx33 = getelementptr float, ptr %col, i64 2
+  %mul38 = fmul float %0, 0.000000e+00
+  %4 = fadd float %mul38, 0.000000e+00
+  store float %4, ptr %arrayidx33, align 4
+  ret void
+}
+
+define void @reuse_shuffle_indices_2(ptr %inertia, double %0) {
+; CHECK-LABEL: define void @reuse_shuffle_indices_2(
+; CHECK-SAME: ptr [[INERTIA:%.*]], double [[TMP0:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> poison, double [[TMP0]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = fptrunc <2 x double> [[TMP2]] to <2 x float>
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x float> [[TMP3]], zeroinitializer
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 poison>
+; CHECK-NEXT:    [[TMP6:%.*]] = fadd <4 x float> [[TMP5]], <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+; CHECK-NEXT:    [[TMP7:%.*]] = fmul <4 x float> [[TMP6]], <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+; CHECK-NEXT:    [[TMP8:%.*]] = fadd <4 x float> [[TMP7]], <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT:    store <3 x float> [[TMP9]], ptr [[INERTIA]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %1 = insertelement <2 x double> poison, double %0, i32 0
+  %2 = shufflevector <2 x double> %1, <2 x double> poison, <2 x i32> zeroinitializer
+  %3 = fptrunc <2 x double> %2 to <2 x float>
+  %4 = fmul <2 x float> %3, zeroinitializer
+  %5 = shufflevector <2 x float> %4, <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 poison>
+  %6 = fadd <4 x float> %5, <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+  %7 = fmul <4 x float> %6, <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+  %8 = fadd <4 x float> %7, <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+  %9 = shufflevector <4 x float> %8, <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+  store <3 x float> %9, ptr %inertia, align 4
+  ret void
+}
+
+define void @reuse_shuffle_indices_cost_crash_2(ptr %bezt, float %0) {
+; CHECK-LABEL: define void @reuse_shuffle_indices_cost_crash_2(
+; CHECK-SAME: ptr [[BEZT:%.*]], float [[TMP0:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[FNEG:%.*]] = fmul float [[TMP0]], 0.000000e+00
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[FNEG]], float 0.000000e+00)
+; CHECK-NEXT:    store float [[TMP1]], ptr [[BEZT]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[FNEG]], float 0.000000e+00)
+; CHECK-NEXT:    [[ARRAYIDX5_I:%.*]] = getelementptr float, ptr [[BEZT]], i64 1
+; CHECK-NEXT:    store float [[TMP2]], ptr [[ARRAYIDX5_I]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = tail call float @llvm.fmuladd.f32(float [[FNEG]], float 0.000000e+00, float 0.000000e+00)
+; CHECK-NEXT:    [[ARRAYIDX8_I831:%.*]] = getelementptr float, ptr [[BEZT]], i64 2
+; CHECK-NEXT:    store float [[TMP3]], ptr [[ARRAYIDX8_I831]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %fneg = fmul float %0, 0.000000e+00
+  %1 = tail call float @llvm.fmuladd.f32(float %0, float %fneg, float 0.000000e+00)
+  store float %1, ptr %bezt, align 4
+  %2 = tail call float @llvm.fmuladd.f32(float %0, float %fneg, float 0.000000e+00)
+  %arrayidx5.i = getelementptr float, ptr %bezt, i64 1
+  store float %2, ptr %arrayidx5.i, align 4
+  %3 = tail call float @llvm.fmuladd.f32(float %fneg, float 0.000000e+00, float 0.000000e+00)
+  %arrayidx8.i831 = getelementptr float, ptr %bezt, i64 2
+  store float %3, ptr %arrayidx8.i831, align 4
+  ret void
+}
+
+define void @reuse_shuffle_indices_cost_crash_3(ptr %m, double %conv, double %conv2) {
+; CHECK-LABEL: define void @reuse_shuffle_indices_cost_crash_3(
+; CHECK-SAME: ptr [[M:%.*]], double [[CONV:%.*]], double [[CONV2:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[SUB19:%.*]] = fsub double 0.000000e+00, [[CONV2]]
+; CHECK-NEXT:    [[CONV20:%.*]] = fptrunc double [[SUB19]] to float
+; CHECK-NEXT:    store float [[CONV20]], ptr [[M]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], 0.000000e+00
+; CHECK-NEXT:    [[CONV239:%.*]] = fptrunc double [[ADD]] to float
+; CHECK-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr [4 x float], ptr [[M]], i64 0, i64 1
+; CHECK-NEXT:    store float [[CONV239]], ptr [[ARRAYIDX25]], align 4
+; CHECK-NEXT:    [[ADD26:%.*]] = fsub double [[CONV]], [[CONV]]
+; CHECK-NEXT:    [[CONV27:%.*]] = fptrunc double [[ADD26]] to float
+; CHECK-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr [4 x float], ptr [[M]], i64 0, i64 2
+; CHECK-NEXT:    store float [[CONV27]], ptr [[ARRAYIDX29]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %sub19 = fsub double 0.000000e+00, %conv2
+  %conv20 = fptrunc double %sub19 to float
+  store float %conv20, ptr %m, align 4
+  %add = fadd double %conv, 0.000000e+00
+  %conv239 = fptrunc double %add to float
+  %arrayidx25 = getelementptr [4 x float], ptr %m, i64 0, i64 1
+  store float %conv239, ptr %arrayidx25, align 4
+  %add26 = fsub double %conv, %conv
+  %conv27 = fptrunc double %add26 to float
+  %arrayidx29 = getelementptr [4 x float], ptr %m, i64 0, i64 2
+  store float %conv27, ptr %arrayidx29, align 4
+  ret void
+}
+
+define void @reuse_shuffle_indices_cost_crash_4(double %conv7.i) {
+; CHECK-LABEL: define void @reuse_shuffle_indices_cost_crash_4(
+; CHECK-SAME: double [[CONV7_I:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[DATA_I111:%.*]] = alloca [0 x [0 x [0 x [3 x float]]]], i32 0, align 4
+; CHECK-NEXT:    [[ARRAYIDX_2_I:%.*]] = getelementptr [3 x float], ptr [[DATA_I111]], i64 0, i64 2
+; CHECK-NEXT:    [[MUL17_I_US:%.*]] = fmul double [[CONV7_I]], 0.000000e+00
+; CHECK-NEXT:    [[MUL_2_I_I_US:%.*]] = fmul double [[MUL17_I_US]], 0.000000e+00
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> poison, double [[CONV7_I]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP2:%.*]] = fadd <2 x double> [[TMP1]], zeroinitializer
+; CHECK-NEXT:    [[ADD_2_I_I_US:%.*]] = fadd double [[MUL_2_I_I_US]], 0.000000e+00
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x double> [[TMP2]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP3]], zeroinitializer
+; CHECK-NEXT:    [[TMP5:%.*]] = fptrunc <2 x double> [[TMP4]] to <2 x float>
+; CHECK-NEXT:    store <2 x float> [[TMP5]], ptr [[DATA_I111]], align 4
+; CHECK-NEXT:    [[CONV_2_I46_US:%.*]] = fptrunc double [[ADD_2_I_I_US]] to float
+; CHECK-NEXT:    store float [[CONV_2_I46_US]], ptr [[ARRAYIDX_2_I]], align 4
+; CHECK-NEXT:    [[CALL2_I_US:%.*]] = load volatile ptr, ptr [[DATA_I111]], align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %data.i111 = alloca [0 x [0 x [0 x [3 x float]]]], i32 0, align 4
+  %arrayidx.1.i = getelementptr [3 x float], ptr %data.i111, i64 0, i64 1
+  %arrayidx.2.i = getelementptr [3 x float], ptr %data.i111, i64 0, i64 2
+  %mul17.i.us = fmul double %conv7.i, 0.000000e+00
+  %mul.2.i.i.us = fmul double %mul17.i.us, 0.000000e+00
+  %add.i.i82.i.us = fadd double %conv7.i, 0.000000e+00
+  %add.1.i.i84.i.us = fadd double %conv7.i, 0.000000e+00
+  %mul.i.i91.i.us = fmul double %add.i.i82.i.us, %conv7.i
+  %mul.1.i.i92.i.us = fmul double %add.1.i.i84.i.us, %conv7.i
+  %add.i96.i.us = fadd double %mul.i.i91.i.us, 0.000000e+00
+  %add.1.i.i.us = fadd double %mul.1.i.i92.i.us, 0.000000e+00
+  %add.2.i.i.us = fadd double %mul.2.i.i.us, 0.000000e+00
+  %conv.i42.us = fptrunc double %add.i96.i.us to float
+  store float %conv.i42.us, ptr %data.i111, align 4
+  %conv.1.i44.us = fptrunc double %add.1.i.i.us to float
+  store float %conv.1.i44.us, ptr %arrayidx.1.i, align 4
+  %conv.2.i46.us = fptrunc double %add.2.i.i.us to float
+  store float %conv.2.i46.us, ptr %arrayidx.2.i, align 4
+  %call2.i.us = load volatile ptr, ptr %data.i111, align 8
+  ret void
+}
+
+define void @common_mask(ptr %m, double %conv, double %conv2) {
+; CHECK-LABEL: define void @common_mask(
+; CHECK-SAME: ptr [[M:%.*]], double [[CONV:%.*]], double [[CONV2:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[SUB19:%.*]] = fsub double [[CONV]], [[CONV]]
+; CHECK-NEXT:    [[CONV20:%.*]] = fptrunc double [[SUB19]] to float
+; CHECK-NEXT:    store float [[CONV20]], ptr [[M]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = fadd double [[CONV2]], 0.000000e+00
+; CHECK-NEXT:    [[CONV239:%.*]] = fptrunc double [[ADD]] to float
+; CHECK-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr [4 x float], ptr [[M]], i64 0, i64 1
+; CHECK-NEXT:    store float [[CONV239]], ptr [[ARRAYIDX25]], align 4
+; CHECK-NEXT:    [[ADD26:%.*]] = fsub double 0.000000e+00, [[CONV]]
+; CHECK-NEXT:    [[CONV27:%.*]] = fptrunc double [[ADD26]] to float
+; CHECK-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr [4 x float], ptr [[M]], i64 0, i64 2
+; CHECK-NEXT:    store float [[CONV27]], ptr [[ARRAYIDX29]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %sub19 = fsub double %conv, %conv
+  %conv20 = fptrunc double %sub19 to float
+  store float %conv20, ptr %m, align 4
+  %add = fadd double %conv2, 0.000000e+00
+  %conv239 = fptrunc double %add to float
+  %arrayidx25 = getelementptr [4 x float], ptr %m, i64 0, i64 1
+  store float %conv239, ptr %arrayidx25, align 4
+  %add26 = fsub double 0.000000e+00, %conv
+  %conv27 = fptrunc double %add26 to float
+  %arrayidx29 = getelementptr [4 x float], ptr %m, i64 0, i64 2
+  store float %conv27, ptr %arrayidx29, align 4
+  ret void
+}
+
+define void @vec3_extract(<3 x i16> %pixel.sroa.0.4.vec.insert606, ptr %call3.i536) {
+; CHECK-LABEL: define void @vec3_extract(
+; CHECK-SAME: <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606:%.*]], ptr [[CALL3_I536:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PIXEL_SROA_0_4_VEC_EXTRACT:%.*]] = extractelement <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], i64 2
+; CHECK-NEXT:    [[RED668:%.*]] = getelementptr i16, ptr [[CALL3_I536]], i64 2
+; CHECK-NEXT:    store i16 [[PIXEL_SROA_0_4_VEC_EXTRACT]], ptr [[RED668]], align 2
+; CHECK-NEXT:    [[PIXEL_SROA_0_2_VEC_EXTRACT:%.*]] = extractelement <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], i64 1
+; CHECK-NEXT:    [[GREEN670:%.*]] = getelementptr i16, ptr [[CALL3_I536]], i64 1
+; CHECK-NEXT:    store i16 [[PIXEL_SROA_0_2_VEC_EXTRACT]], ptr [[GREEN670]], align 2
+; CHECK-NEXT:    [[PIXEL_SROA_0_0_VEC_EXTRACT:%.*]] = extractelement <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], i64 0
+; CHECK-NEXT:    store i16 [[PIXEL_SROA_0_0_VEC_EXTRACT]], ptr [[CALL3_I536]], align 2
+; CHECK-NEXT:    ret void
+;
+entry:
+  %pixel.sroa.0.4.vec.extract = extractelement <3 x i16> %pixel.sroa.0.4.vec.insert606, i64 2
+  %red668 = getelementptr i16, ptr %call3.i536, i64 2
+  store i16 %pixel.sroa.0.4.vec.extract, ptr %red668, align 2
+  %pixel.sroa.0.2.vec.extract = extractelement <3 x i16> %pixel.sroa.0.4.vec.insert606, i64 1
+  %green670 = getelementptr i16, ptr %call3.i536, i64 1
+  store i16 %pixel.sroa.0.2.vec.extract, ptr %green670, align 2
+  %pixel.sroa.0.0.vec.extract = extractelement <3 x i16> %pixel.sroa.0.4.vec.insert606, i64 0
+  store i16 %pixel.sroa.0.0.vec.extract, ptr %call3.i536, align 2
+  ret void
+}
+
+declare float @llvm.fmuladd.f32(float, float, float)

>From ffbf881029054001c0c5700386763fc07394afbf Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 11 Jan 2024 15:48:08 +0000
Subject: [PATCH 2/2] [SLP] Vectorize non-power-of-2 ops with padding.

This patch introduces a new VectorizeWithPadding node type for root and
leave nodes to allow vectorizing loads/stores with non-power-of-2 number
of elements.

VectorizeWithPadding load nodes will pad the result to the next power of 2
with poison elements.

Non-leaf nodes will operate on normal power-of-2 vectors. For those
non-leaf nodes, we still track the number of padding elements needed to
go to the next power-of-2, to be used in various places, like cost
computation.

VectorizeWithPadding store nodes strip away the padding elements and
store the non-power-of-2 number of data elements.

Note that re-ordering and shuffling is not implemented for nodes
requiring padding yet to keep the initial implementation simpler.

The initial implementation also only tries to vectorize with padding if
original number of elements + 1 is a power-of-2, i.e. if only a single
padding element is needed.

The feature is guarded by a new flag, off by defaul for now.
---
 .../Transforms/Vectorize/SLPVectorizer.cpp    | 281 +++++++++++---
 .../SLPVectorizer/AArch64/vec15-base.ll       |  90 +++--
 .../SLPVectorizer/AArch64/vec3-base.ll        | 239 ++++++++----
 .../SLPVectorizer/AArch64/vec3-calls.ll       |   6 +-
 .../AArch64/vec3-reorder-reshuffle.ll         | 119 ++++--
 .../Transforms/SLPVectorizer/X86/odd_store.ll |  69 ++--
 .../X86/vect_copyable_in_binops.ll            | 351 ++++++++++--------
 7 files changed, 807 insertions(+), 348 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 055fbb00871f89..a281ec3acb3b46 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -179,6 +179,10 @@ static cl::opt<bool>
     ViewSLPTree("view-slp-tree", cl::Hidden,
                 cl::desc("Display the SLP trees with Graphviz"));
 
+static cl::opt<bool> VectorizeWithPadding(
+    "slp-vectorize-with-padding", cl::init(false), cl::Hidden,
+    cl::desc("Try to vectorize non-power-of-2 operations using padding."));
+
 // Limit the number of alias checks. The limit is chosen so that
 // it has no negative effect on the llvm benchmarks.
 static const unsigned AliasedCheckLimit = 10;
@@ -2557,7 +2561,7 @@ class BoUpSLP {
     unsigned getVectorFactor() const {
       if (!ReuseShuffleIndices.empty())
         return ReuseShuffleIndices.size();
-      return Scalars.size();
+      return Scalars.size() + getNumPadding();
     };
 
     /// A vector of scalars.
@@ -2574,6 +2578,7 @@ class BoUpSLP {
     /// intrinsics for store/load)?
     enum EntryState {
       Vectorize,
+      VectorizeWithPadding,
       ScatterVectorize,
       PossibleStridedVectorize,
       NeedToGather
@@ -2611,6 +2616,9 @@ class BoUpSLP {
     Instruction *MainOp = nullptr;
     Instruction *AltOp = nullptr;
 
+    /// The number of padding lanes (containing poison).
+    unsigned NumPadding = 0;
+
   public:
     /// Set this bundle's \p OpIdx'th operand to \p OpVL.
     void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
@@ -2733,6 +2741,15 @@ class BoUpSLP {
                           SmallVectorImpl<Value *> *OpScalars = nullptr,
                           SmallVectorImpl<Value *> *AltScalars = nullptr) const;
 
+    /// Set the number of apdding lanes for this node.
+    void setNumPadding(unsigned Padding) {
+      assert(NumPadding == 0 && "Cannot change padding more than once.");
+      NumPadding = Padding;
+    }
+
+    /// Return the number of padding lanes (containg poison) for this node.
+    unsigned getNumPadding() const { return NumPadding; }
+
 #ifndef NDEBUG
     /// Debug printer.
     LLVM_DUMP_METHOD void dump() const {
@@ -2750,6 +2767,9 @@ class BoUpSLP {
       case Vectorize:
         dbgs() << "Vectorize\n";
         break;
+      case VectorizeWithPadding:
+        dbgs() << "VectorizeWithPadding\n";
+        break;
       case ScatterVectorize:
         dbgs() << "ScatterVectorize\n";
         break;
@@ -2790,6 +2810,8 @@ class BoUpSLP {
       for (const auto &EInfo : UserTreeIndices)
         dbgs() << EInfo << ", ";
       dbgs() << "\n";
+      if (getNumPadding() > 0)
+        dbgs() << "Padding: " << getNumPadding() << "\n";
     }
 #endif
   };
@@ -2891,9 +2913,19 @@ class BoUpSLP {
           ValueToGatherNodes.try_emplace(V).first->getSecond().insert(Last);
     }
 
-    if (UserTreeIdx.UserTE)
+    if (UserTreeIdx.UserTE) {
       Last->UserTreeIndices.push_back(UserTreeIdx);
-
+      if (!isPowerOf2_32(Last->Scalars.size()) &&
+          Last->State != TreeEntry::VectorizeWithPadding) {
+        if (UserTreeIdx.UserTE->State == TreeEntry::VectorizeWithPadding)
+          Last->setNumPadding(1);
+        else {
+          Last->setNumPadding(UserTreeIdx.UserTE->getNumPadding());
+          assert((Last->getNumPadding() == 0 || Last->ReorderIndices.empty()) &&
+                 "Reodering isn't implemented for nodes with padding yet");
+        }
+      }
+    }
     return Last;
   }
 
@@ -2921,7 +2953,8 @@ class BoUpSLP {
   /// and fills required data before actual scheduling of the instructions.
   TreeEntry::EntryState getScalarsVectorizationState(
       InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE,
-      OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const;
+      OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps,
+      bool HasPadding) const;
 
   /// Maps a specific scalar to its tree entry.
   SmallDenseMap<Value *, TreeEntry *> ScalarToTreeEntry;
@@ -3822,6 +3855,7 @@ namespace {
 enum class LoadsState {
   Gather,
   Vectorize,
+  VectorizeWithPadding,
   ScatterVectorize,
   PossibleStridedVectorize
 };
@@ -3898,8 +3932,10 @@ static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
       std::optional<int> Diff =
           getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE);
       // Check that the sorted loads are consecutive.
+      bool NeedsPadding = !isPowerOf2_32(VL.size());
       if (static_cast<unsigned>(*Diff) == VL.size() - 1)
-        return LoadsState::Vectorize;
+        return NeedsPadding ? LoadsState::VectorizeWithPadding
+                            : LoadsState::Vectorize;
       // Simple check if not a strided access - clear order.
       IsPossibleStrided = *Diff % (VL.size() - 1) == 0;
     }
@@ -4534,7 +4570,8 @@ void BoUpSLP::reorderTopToBottom() {
         continue;
       }
       if ((TE->State == TreeEntry::Vectorize ||
-           TE->State == TreeEntry::PossibleStridedVectorize) &&
+           TE->State == TreeEntry::PossibleStridedVectorize ||
+           TE->State == TreeEntry::VectorizeWithPadding) &&
           isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst,
               InsertElementInst>(TE->getMainOp()) &&
           !TE->isAltShuffle()) {
@@ -4568,6 +4605,10 @@ bool BoUpSLP::canReorderOperands(
     TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
     ArrayRef<TreeEntry *> ReorderableGathers,
     SmallVectorImpl<TreeEntry *> &GatherOps) {
+  // Reordering isn't implemented for nodes with padding yet.
+  if (UserTE->getNumPadding() > 0)
+    return false;
+
   for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) {
     if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) {
           return OpData.first == I &&
@@ -4746,6 +4787,10 @@ void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
         auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0));
         const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders](
                                             const TreeEntry *TE) {
+          // Reordering for nodes with padding not implemented yet.
+          if (TE->getNumPadding() > 0 ||
+              TE->State == TreeEntry::VectorizeWithPadding)
+            return false;
           if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
               (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) ||
               (IgnoreReorder && TE->Idx == 0))
@@ -5233,7 +5278,8 @@ static bool isAlternateInstruction(const Instruction *I,
 
 BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
     InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE,
-    OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const {
+    OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps,
+    bool HasPadding) const {
   assert(S.MainOp && "Expected instructions with same/alternate opcodes only.");
 
   unsigned ShuffleOrOp =
@@ -5256,7 +5302,7 @@ BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
   }
   case Instruction::ExtractValue:
   case Instruction::ExtractElement: {
-    bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
+    bool Reuse = !HasPadding && canReuseExtract(VL, VL0, CurrentOrder);
     if (Reuse || !CurrentOrder.empty())
       return TreeEntry::Vectorize;
     LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
@@ -5294,6 +5340,8 @@ BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
                               PointerOps)) {
     case LoadsState::Vectorize:
       return TreeEntry::Vectorize;
+    case LoadsState::VectorizeWithPadding:
+      return TreeEntry::VectorizeWithPadding;
     case LoadsState::ScatterVectorize:
       return TreeEntry::ScatterVectorize;
     case LoadsState::PossibleStridedVectorize:
@@ -5353,6 +5401,15 @@ BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
     }
     return TreeEntry::Vectorize;
   }
+  case Instruction::UDiv:
+  case Instruction::SDiv:
+  case Instruction::URem:
+  case Instruction::SRem:
+    // The instruction may trigger immediate UB on the poison/undef padding
+    // elements, so force gather to avoid introducing new UB.
+    if (HasPadding)
+      return TreeEntry::NeedToGather;
+    [[fallthrough]];
   case Instruction::Select:
   case Instruction::FNeg:
   case Instruction::Add:
@@ -5361,11 +5418,7 @@ BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
   case Instruction::FSub:
   case Instruction::Mul:
   case Instruction::FMul:
-  case Instruction::UDiv:
-  case Instruction::SDiv:
   case Instruction::FDiv:
-  case Instruction::URem:
-  case Instruction::SRem:
   case Instruction::FRem:
   case Instruction::Shl:
   case Instruction::LShr:
@@ -5548,6 +5601,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
                                  bool DoNotFail = false) {
     // Check that every instruction appears once in this bundle.
     DenseMap<Value *, unsigned> UniquePositions(VL.size());
+    auto OriginalVL = VL;
     for (Value *V : VL) {
       if (isConstant(V)) {
         ReuseShuffleIndicies.emplace_back(
@@ -5560,6 +5614,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
       if (Res.second)
         UniqueValues.emplace_back(V);
     }
+
     size_t NumUniqueScalarValues = UniqueValues.size();
     if (NumUniqueScalarValues == VL.size()) {
       ReuseShuffleIndicies.clear();
@@ -5587,6 +5642,15 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
             NonUniqueValueVL.append(PWSz - UniqueValues.size(),
                                     UniqueValues.back());
             VL = NonUniqueValueVL;
+
+            if (UserTreeIdx.UserTE &&
+                UserTreeIdx.UserTE->getNumPadding() != 0) {
+              LLVM_DEBUG(dbgs() << "SLP: Reshuffling scalars not yet supported "
+                                   "for nodes with padding.\n");
+              newTreeEntry(OriginalVL, std::nullopt /*not vectorized*/, S,
+                           UserTreeIdx);
+              return false;
+            }
           }
           return true;
         }
@@ -5595,6 +5659,13 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
         return false;
       }
       VL = UniqueValues;
+      if (UserTreeIdx.UserTE && UserTreeIdx.UserTE->getNumPadding() != 0) {
+        LLVM_DEBUG(dbgs() << "SLP: Reshuffling scalars not yet supported for "
+                             "nodes with padding.\n");
+        newTreeEntry(OriginalVL, std::nullopt /*not vectorized*/, S,
+                     UserTreeIdx);
+        return false;
+      }
     }
     return true;
   };
@@ -5859,7 +5930,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
   OrdersType CurrentOrder;
   SmallVector<Value *> PointerOps;
   TreeEntry::EntryState State = getScalarsVectorizationState(
-      S, VL, IsScatterVectorizeUserTE, CurrentOrder, PointerOps);
+      S, VL, IsScatterVectorizeUserTE, CurrentOrder, PointerOps,
+      UserTreeIdx.UserTE && UserTreeIdx.UserTE->getNumPadding() > 0);
   if (State == TreeEntry::NeedToGather) {
     newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
                  ReuseShuffleIndicies);
@@ -6001,16 +6073,25 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
       fixupOrderingIndices(CurrentOrder);
       switch (State) {
       case TreeEntry::Vectorize:
+      case TreeEntry::VectorizeWithPadding:
         if (CurrentOrder.empty()) {
           // Original loads are consecutive and does not require reordering.
-          TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
+          TE = newTreeEntry(VL, State, Bundle, S, UserTreeIdx,
                             ReuseShuffleIndicies);
-          LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
+          LLVM_DEBUG(dbgs() << "SLP: added a vector of loads"
+                            << (State == TreeEntry::VectorizeWithPadding
+                                    ? " with padding"
+                                    : "")
+                            << ".\n");
         } else {
           // Need to reorder.
-          TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
+          TE = newTreeEntry(VL, State, Bundle, S, UserTreeIdx,
                             ReuseShuffleIndicies, CurrentOrder);
-          LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
+          LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads"
+                            << (State == TreeEntry::VectorizeWithPadding
+                                    ? " with padding"
+                                    : "")
+                            << ".\n");
         }
         TE->setOperandsInOrder();
         break;
@@ -6211,21 +6292,32 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
         *OIter = SI->getValueOperand();
         ++OIter;
       }
+      TreeEntry::EntryState State = isPowerOf2_32(VL.size())
+                                        ? TreeEntry::Vectorize
+                                        : TreeEntry::VectorizeWithPadding;
       // Check that the sorted pointer operands are consecutive.
       if (CurrentOrder.empty()) {
         // Original stores are consecutive and does not require reordering.
-        TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
+        TreeEntry *TE = newTreeEntry(VL, State, Bundle, S, UserTreeIdx,
                                      ReuseShuffleIndicies);
         TE->setOperandsInOrder();
         buildTree_rec(Operands, Depth + 1, {TE, 0});
-        LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
+        LLVM_DEBUG(dbgs() << "SLP: added a vector of stores"
+                          << (State == TreeEntry::VectorizeWithPadding
+                                  ? " with padding"
+                                  : "")
+                          << ".\n");
       } else {
         fixupOrderingIndices(CurrentOrder);
-        TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
+        TreeEntry *TE = newTreeEntry(VL, State, Bundle, S, UserTreeIdx,
                                      ReuseShuffleIndicies, CurrentOrder);
         TE->setOperandsInOrder();
         buildTree_rec(Operands, Depth + 1, {TE, 0});
-        LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
+        LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores"
+                          << (State == TreeEntry::VectorizeWithPadding
+                                  ? " with padding"
+                                  : "")
+                          << ".\n");
       }
       return;
     }
@@ -6955,7 +7047,8 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
     return Constant::getAllOnesValue(Ty);
   }
 
-  InstructionCost getBuildVectorCost(ArrayRef<Value *> VL, Value *Root) {
+  InstructionCost getBuildVectorCost(ArrayRef<Value *> VL, Value *Root,
+                                     bool WithPadding = false) {
     if ((!Root && allConstant(VL)) || all_of(VL, UndefValue::classof))
       return TTI::TCC_Free;
     auto *VecTy = FixedVectorType::get(VL.front()->getType(), VL.size());
@@ -6966,7 +7059,7 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
     InstructionsState S = getSameOpcode(VL, *R.TLI);
     const unsigned Sz = R.DL->getTypeSizeInBits(VL.front()->getType());
     unsigned MinVF = R.getMinVF(2 * Sz);
-    if (VL.size() > 2 &&
+    if (!WithPadding && VL.size() > 2 &&
         ((S.getOpcode() == Instruction::Load && !S.isAltShuffle()) ||
          (InVectors.empty() &&
           any_of(seq<unsigned>(0, VL.size() / MinVF),
@@ -7002,6 +7095,7 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
                                   *R.LI, *R.TLI, CurrentOrder, PointerOps);
             switch (LS) {
             case LoadsState::Vectorize:
+            case LoadsState::VectorizeWithPadding:
             case LoadsState::ScatterVectorize:
             case LoadsState::PossibleStridedVectorize:
               // Mark the vectorized loads so that we don't vectorize them
@@ -7077,7 +7171,7 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
         }
         GatherCost -= ScalarsCost;
       }
-    } else if (!Root && isSplat(VL)) {
+    } else if (!WithPadding && !Root && isSplat(VL)) {
       // Found the broadcasting of the single scalar, calculate the cost as
       // the broadcast.
       const auto *It =
@@ -7638,8 +7732,8 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
         CommonMask[Idx] = Mask[Idx] + VF;
   }
   Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0,
-                Value *Root = nullptr) {
-    Cost += getBuildVectorCost(VL, Root);
+                Value *Root = nullptr, bool WithPadding = false) {
+    Cost += getBuildVectorCost(VL, Root, WithPadding);
     if (!Root) {
       // FIXME: Need to find a way to avoid use of getNullValue here.
       SmallVector<Constant *> Vals;
@@ -7743,7 +7837,7 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
   }
   if (!FixedVectorType::isValidElementType(ScalarTy))
     return InstructionCost::getInvalid();
-  auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
+  auto *VecTy = FixedVectorType::get(ScalarTy, VL.size() + E->getNumPadding());
   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
 
   // If we have computed a smaller type for the expression, update VecTy so
@@ -7751,7 +7845,7 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
   auto It = MinBWs.find(E);
   if (It != MinBWs.end()) {
     ScalarTy = IntegerType::get(F->getContext(), It->second.first);
-    VecTy = FixedVectorType::get(ScalarTy, VL.size());
+    VecTy = FixedVectorType::get(ScalarTy, VL.size() + E->getNumPadding());
   }
   unsigned EntryVF = E->getVectorFactor();
   auto *FinalVecTy = FixedVectorType::get(ScalarTy, EntryVF);
@@ -7785,6 +7879,7 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
     CommonCost =
         TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask);
   assert((E->State == TreeEntry::Vectorize ||
+          E->State == TreeEntry::VectorizeWithPadding ||
           E->State == TreeEntry::ScatterVectorize ||
           E->State == TreeEntry::PossibleStridedVectorize) &&
          "Unhandled state");
@@ -7890,7 +7985,8 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
     // loads) or (2) when Ptrs are the arguments of loads or stores being
     // vectorized as plane wide unit-stride load/store since all the
     // loads/stores are known to be from/to adjacent locations.
-    assert(E->State == TreeEntry::Vectorize &&
+    assert((E->State == TreeEntry::Vectorize ||
+            E->State == TreeEntry::VectorizeWithPadding) &&
            "Entry state expected to be Vectorize here.");
     if (isa<LoadInst, StoreInst>(VL0)) {
       // Case 2: estimate costs for pointer related costs when vectorizing to
@@ -8146,7 +8242,8 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
   case Instruction::BitCast: {
     auto SrcIt = MinBWs.find(getOperandEntry(E, 0));
     Type *SrcScalarTy = VL0->getOperand(0)->getType();
-    auto *SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size());
+    auto *SrcVecTy =
+        FixedVectorType::get(SrcScalarTy, VL.size() + E->getNumPadding());
     unsigned Opcode = ShuffleOrOp;
     unsigned VecOpcode = Opcode;
     if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() &&
@@ -8156,7 +8253,8 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
       if (SrcIt != MinBWs.end()) {
         SrcBWSz = SrcIt->second.first;
         SrcScalarTy = IntegerType::get(F->getContext(), SrcBWSz);
-        SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size());
+        SrcVecTy =
+            FixedVectorType::get(SrcScalarTy, VL.size() + E->getNumPadding());
       }
       unsigned BWSz = DL->getTypeSizeInBits(ScalarTy);
       if (BWSz == SrcBWSz) {
@@ -8299,10 +8397,19 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
     auto *LI0 = cast<LoadInst>(VL0);
     auto GetVectorCost = [&](InstructionCost CommonCost) {
       InstructionCost VecLdCost;
-      if (E->State == TreeEntry::Vectorize) {
+      if (E->State == TreeEntry::Vectorize ||
+          E->State == TreeEntry::VectorizeWithPadding) {
         VecLdCost = TTI->getMemoryOpCost(
             Instruction::Load, VecTy, LI0->getAlign(),
             LI0->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo());
+        if (E->State == TreeEntry::VectorizeWithPadding) {
+          auto *SrcTy =
+              FixedVectorType::get(VecTy->getElementType(), VL.size() + 1);
+          SmallVector<int> Mask(VL.size() + 1, PoisonMaskElem);
+          std::iota(Mask.begin(), Mask.begin() + VL.size(), 0);
+          VecLdCost += TTI->getShuffleCost(TTI::SK_InsertSubvector, SrcTy, Mask,
+                                           TTI::TCK_RecipThroughput, 0, VecTy);
+        }
       } else {
         assert((E->State == TreeEntry::ScatterVectorize ||
                 E->State == TreeEntry::PossibleStridedVectorize) &&
@@ -8345,10 +8452,20 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
     auto GetVectorCost = [=](InstructionCost CommonCost) {
       // We know that we can merge the stores. Calculate the cost.
       TTI::OperandValueInfo OpInfo = getOperandInfo(E->getOperand(0));
-      return TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(),
-                                  BaseSI->getPointerAddressSpace(), CostKind,
-                                  OpInfo) +
-             CommonCost;
+      InstructionCost Cost =
+          TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(),
+                               BaseSI->getPointerAddressSpace(), CostKind,
+                               OpInfo) +
+          CommonCost;
+      if (E->State == TreeEntry::VectorizeWithPadding) {
+        auto *SrcTy =
+            FixedVectorType::get(VecTy->getElementType(), VL.size() + 1);
+        SmallVector<int> Mask(VL.size() + 1, PoisonMaskElem);
+        std::iota(Mask.begin(), Mask.begin() + VL.size(), 0);
+        Cost += TTI->getShuffleCost(TTI::SK_ExtractSubvector, SrcTy, Mask,
+                                    TTI::TCK_RecipThroughput, 0, VecTy);
+      }
+      return Cost;
     };
     SmallVector<Value *> PointerOps(VL.size());
     for (auto [I, V] : enumerate(VL)) {
@@ -9708,6 +9825,9 @@ BoUpSLP::isGatherShuffledEntry(
   // No need to check for the topmost gather node.
   if (TE == VectorizableTree.front().get())
     return {};
+  // Gathering for nodes with padding is not implemented yet.
+  if (TE->getNumPadding() > 0)
+    return {};
   Mask.assign(VL.size(), PoisonMaskElem);
   assert(TE->UserTreeIndices.size() == 1 &&
          "Expected only single user of the gather node.");
@@ -10420,7 +10540,7 @@ class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis {
     add(V1, NewMask);
   }
   Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0,
-                Value *Root = nullptr) {
+                Value *Root = nullptr, bool WithPadding = false) {
     return R.gather(VL, Root);
   }
   Value *createFreeze(Value *V) { return Builder.CreateFreeze(V); }
@@ -10489,7 +10609,6 @@ Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx,
     SmallVector<int> Mask(E->ReorderIndices.begin(), E->ReorderIndices.end());
     reorderScalars(VL, Mask);
   }
-  const unsigned VF = VL.size();
   InstructionsState S = getSameOpcode(VL, *TLI);
   // Special processing for GEPs bundle, which may include non-gep values.
   if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) {
@@ -10531,6 +10650,7 @@ Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx,
         ShuffleBuilder.add(V, Mask);
         return ShuffleBuilder.finalize(std::nullopt);
       };
+      const unsigned VF = VL.size() + E->getNumPadding();
       Value *V = vectorizeTree(VE, PostponedPHIs);
       if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) {
         if (!VE->ReuseShuffleIndices.empty()) {
@@ -10657,6 +10777,14 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) {
     return true;
   };
   BVTy ShuffleBuilder(Params...);
+  if (E->getNumPadding() > 0) {
+    Value *BV = ShuffleBuilder.gather(E->Scalars, 0, nullptr, true);
+    SmallVector<int> Mask(VF, PoisonMaskElem);
+    std::iota(Mask.begin(), Mask.begin() + E->Scalars.size(), 0);
+    ShuffleBuilder.add(BV, Mask);
+    return ShuffleBuilder.finalize(E->ReuseShuffleIndices);
+  }
+
   ResTy Res = ResTy();
   SmallVector<int> Mask;
   SmallVector<int> ExtractMask(GatheredScalars.size(), PoisonMaskElem);
@@ -11064,8 +11192,22 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
 
   auto FinalShuffle = [&](Value *V, const TreeEntry *E, VectorType *VecTy,
                           bool IsSigned) {
-    if (V->getType() != VecTy)
-      V = Builder.CreateIntCast(V, VecTy, IsSigned);
+    if (V->getType() != VecTy) {
+      if (E->getNumPadding() > 0 &&
+          cast<FixedVectorType>(V->getType())->getNumElements() +
+                  E->getNumPadding() ==
+              cast<FixedVectorType>(VecTy)->getNumElements()) {
+        assert(E->ReorderIndices.empty());
+        assert(E->ReuseShuffleIndices.empty());
+        SmallVector<int> Mask(cast<FixedVectorType>(VecTy)->getNumElements(),
+                              PoisonMaskElem);
+        ShuffleInstructionBuilder ShuffleBuilder(Builder, *this);
+        ShuffleBuilder.add(V, std::nullopt);
+        std::iota(Mask.begin(), Mask.begin() + E->Scalars.size(), 0);
+        return ShuffleBuilder.finalize(Mask);
+      } else
+        V = Builder.CreateIntCast(V, VecTy, IsSigned);
+    }
     ShuffleInstructionBuilder ShuffleBuilder(Builder, *this);
     if (E->getOpcode() == Instruction::Store) {
       ArrayRef<int> Mask =
@@ -11081,6 +11223,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
   };
 
   assert((E->State == TreeEntry::Vectorize ||
+          E->State == TreeEntry::VectorizeWithPadding ||
           E->State == TreeEntry::ScatterVectorize ||
           E->State == TreeEntry::PossibleStridedVectorize) &&
          "Unhandled state");
@@ -11098,7 +11241,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
     ScalarTy = IntegerType::get(F->getContext(), It->second.first);
     IsSigned = It->second.second;
   }
-  auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size());
+  auto *VecTy =
+      FixedVectorType::get(ScalarTy, E->Scalars.size() + E->getNumPadding());
   switch (ShuffleOrOp) {
     case Instruction::PHI: {
       assert((E->ReorderIndices.empty() ||
@@ -11544,8 +11688,19 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
       LoadInst *LI = cast<LoadInst>(VL0);
       Instruction *NewLI;
       Value *PO = LI->getPointerOperand();
-      if (E->State == TreeEntry::Vectorize) {
+      Value *V;
+      if (E->State == TreeEntry::VectorizeWithPadding) {
+        auto *VecTy =
+            FixedVectorType::get(E->Scalars[0]->getType(), E->Scalars.size());
         NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign());
+        SmallVector<int> Mask(E->Scalars.size() + 1, PoisonMaskElem);
+        assert(isPowerOf2_32(E->Scalars.size() + 1) &&
+               "unexpected padding needed");
+        std::iota(Mask.begin(), Mask.begin() + E->Scalars.size(), 0);
+        V = Builder.CreateShuffleVector(NewLI, PoisonValue::get(VecTy), Mask);
+      } else if (E->State == TreeEntry::Vectorize) {
+        NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign());
+        V = NewLI;
       } else {
         assert((E->State == TreeEntry::ScatterVectorize ||
                 E->State == TreeEntry::PossibleStridedVectorize) &&
@@ -11561,10 +11716,12 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
           CommonAlignment =
               std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
         NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment);
+        V = NewLI;
       }
-      Value *V = propagateMetadata(NewLI, E->Scalars);
+      propagateMetadata(NewLI, E->Scalars);
 
-      V = FinalShuffle(V, E, VecTy, IsSigned);
+      if (E->State != TreeEntry::VectorizeWithPadding)
+        V = FinalShuffle(V, E, VecTy, IsSigned);
       E->VectorizedValue = V;
       ++NumVectorInstructions;
       return V;
@@ -11639,8 +11796,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
       SmallVector<Type *, 2> TysForDecl;
       // Add return type if intrinsic is overloaded on it.
       if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, -1))
-        TysForDecl.push_back(
-            FixedVectorType::get(CI->getType(), E->Scalars.size()));
+        TysForDecl.push_back(FixedVectorType::get(
+            CI->getType(), E->Scalars.size() + E->getNumPadding()));
       for (unsigned I : seq<unsigned>(0, CI->arg_size())) {
         ValueList OpVL;
         // Some intrinsics have scalar arguments. This argument should not be
@@ -13421,7 +13578,8 @@ bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
   unsigned VF = Chain.size();
 
   if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF)
-    return false;
+    if (!VectorizeWithPadding || (VF < MinVF && VF + 1 != MinVF))
+      return false;
 
   LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx
                     << "\n");
@@ -13517,9 +13675,36 @@ bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
                           << "MinVF (" << MinVF << ")\n");
       }
 
+      unsigned StartIdx = 0;
+      if (VectorizeWithPadding) {
+        unsigned CandVF = Operands.size() + 1;
+        if (isPowerOf2_32(CandVF) && CandVF <= MaxVF) {
+          assert(
+              all_of(
+                  Operands,
+                  [&](Value *V) {
+                    return cast<StoreInst>(V)->getValueOperand()->getType() ==
+                           cast<StoreInst>(Operands.front())
+                               ->getValueOperand()
+                               ->getType();
+                  }) &&
+              "Expected all operands of same type.");
+          if (!VectorizedStores.count(Operands.front()) &&
+              !VectorizedStores.count(Operands.back()) &&
+              TriedSequences
+                  .insert(std::make_pair(Operands.front(), Operands.back()))
+                  .second &&
+              vectorizeStoreChain(Operands, R, Operands.size(), MinVF)) {
+            // Mark the vectorized stores so that we don't vectorize them again.
+            VectorizedStores.insert(Operands.begin(), Operands.end());
+            Changed = true;
+            StartIdx += Operands.size();
+          }
+        }
+      }
+
       // FIXME: Is division-by-2 the correct step? Should we assert that the
       // register size is a power-of-2?
-      unsigned StartIdx = 0;
       for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) {
         for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) {
           ArrayRef<Value *> Slice = ArrayRef(Operands).slice(Cnt, Size);
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vec15-base.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vec15-base.ll
index b9e959d50befdd..db54d7a9e37dba 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/vec15-base.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vec15-base.ll
@@ -1,35 +1,65 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -passes=slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
+; RUN: opt -passes=slp-vectorizer -slp-vectorize-with-padding -mtriple=arm64-apple-ios -S %s | FileCheck --check-prefixes=PADDING %s
+; RUN: opt -passes=slp-vectorizer -slp-vectorize-with-padding=false -mtriple=arm64-apple-ios -S %s | FileCheck --check-prefixes=NO-PADDING %s
 
 define void @v15_load_i8_mul_by_constant_store(ptr %src, ptr noalias %dst) {
-; CHECK-LABEL: define void @v15_load_i8_mul_by_constant_store(
-; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 0
-; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[GEP_SRC_0]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = mul nsw <8 x i8> [[TMP0]], <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
-; CHECK-NEXT:    store <8 x i8> [[TMP1]], ptr [[DST]], align 1
-; CHECK-NEXT:    [[GEP_SRC_8:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 8
-; CHECK-NEXT:    [[DST_8:%.*]] = getelementptr i8, ptr [[DST]], i8 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_SRC_8]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = mul nsw <4 x i8> [[TMP2]], <i8 10, i8 10, i8 10, i8 10>
-; CHECK-NEXT:    store <4 x i8> [[TMP3]], ptr [[DST_8]], align 1
-; CHECK-NEXT:    [[GEP_SRC_12:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 12
-; CHECK-NEXT:    [[L_SRC_12:%.*]] = load i8, ptr [[GEP_SRC_12]], align 4
-; CHECK-NEXT:    [[MUL_12:%.*]] = mul nsw i8 [[L_SRC_12]], 10
-; CHECK-NEXT:    [[DST_12:%.*]] = getelementptr i8, ptr [[DST]], i8 12
-; CHECK-NEXT:    store i8 [[MUL_12]], ptr [[DST_12]], align 1
-; CHECK-NEXT:    [[GEP_SRC_13:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 13
-; CHECK-NEXT:    [[L_SRC_13:%.*]] = load i8, ptr [[GEP_SRC_13]], align 4
-; CHECK-NEXT:    [[MUL_13:%.*]] = mul nsw i8 [[L_SRC_13]], 10
-; CHECK-NEXT:    [[DST_13:%.*]] = getelementptr i8, ptr [[DST]], i8 13
-; CHECK-NEXT:    store i8 [[MUL_13]], ptr [[DST_13]], align 1
-; CHECK-NEXT:    [[GEP_SRC_14:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 14
-; CHECK-NEXT:    [[L_SRC_14:%.*]] = load i8, ptr [[GEP_SRC_14]], align 4
-; CHECK-NEXT:    [[MUL_14:%.*]] = mul nsw i8 [[L_SRC_14]], 10
-; CHECK-NEXT:    [[DST_14:%.*]] = getelementptr i8, ptr [[DST]], i8 14
-; CHECK-NEXT:    store i8 [[MUL_14]], ptr [[DST_14]], align 1
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: define void @v15_load_i8_mul_by_constant_store(
+; PADDING-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 0
+; PADDING-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[GEP_SRC_0]], align 4
+; PADDING-NEXT:    [[TMP1:%.*]] = mul nsw <8 x i8> [[TMP0]], <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
+; PADDING-NEXT:    store <8 x i8> [[TMP1]], ptr [[DST]], align 1
+; PADDING-NEXT:    [[GEP_SRC_8:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 8
+; PADDING-NEXT:    [[DST_8:%.*]] = getelementptr i8, ptr [[DST]], i8 8
+; PADDING-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_SRC_8]], align 4
+; PADDING-NEXT:    [[TMP3:%.*]] = mul nsw <4 x i8> [[TMP2]], <i8 10, i8 10, i8 10, i8 10>
+; PADDING-NEXT:    store <4 x i8> [[TMP3]], ptr [[DST_8]], align 1
+; PADDING-NEXT:    [[GEP_SRC_12:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 12
+; PADDING-NEXT:    [[L_SRC_12:%.*]] = load i8, ptr [[GEP_SRC_12]], align 4
+; PADDING-NEXT:    [[MUL_12:%.*]] = mul nsw i8 [[L_SRC_12]], 10
+; PADDING-NEXT:    [[DST_12:%.*]] = getelementptr i8, ptr [[DST]], i8 12
+; PADDING-NEXT:    store i8 [[MUL_12]], ptr [[DST_12]], align 1
+; PADDING-NEXT:    [[GEP_SRC_13:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 13
+; PADDING-NEXT:    [[L_SRC_13:%.*]] = load i8, ptr [[GEP_SRC_13]], align 4
+; PADDING-NEXT:    [[MUL_13:%.*]] = mul nsw i8 [[L_SRC_13]], 10
+; PADDING-NEXT:    [[DST_13:%.*]] = getelementptr i8, ptr [[DST]], i8 13
+; PADDING-NEXT:    store i8 [[MUL_13]], ptr [[DST_13]], align 1
+; PADDING-NEXT:    [[GEP_SRC_14:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 14
+; PADDING-NEXT:    [[L_SRC_14:%.*]] = load i8, ptr [[GEP_SRC_14]], align 4
+; PADDING-NEXT:    [[MUL_14:%.*]] = mul nsw i8 [[L_SRC_14]], 10
+; PADDING-NEXT:    [[DST_14:%.*]] = getelementptr i8, ptr [[DST]], i8 14
+; PADDING-NEXT:    store i8 [[MUL_14]], ptr [[DST_14]], align 1
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: define void @v15_load_i8_mul_by_constant_store(
+; NO-PADDING-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 0
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[GEP_SRC_0]], align 4
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = mul nsw <8 x i8> [[TMP0]], <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
+; NO-PADDING-NEXT:    store <8 x i8> [[TMP1]], ptr [[DST]], align 1
+; NO-PADDING-NEXT:    [[GEP_SRC_8:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 8
+; NO-PADDING-NEXT:    [[DST_8:%.*]] = getelementptr i8, ptr [[DST]], i8 8
+; NO-PADDING-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_SRC_8]], align 4
+; NO-PADDING-NEXT:    [[TMP3:%.*]] = mul nsw <4 x i8> [[TMP2]], <i8 10, i8 10, i8 10, i8 10>
+; NO-PADDING-NEXT:    store <4 x i8> [[TMP3]], ptr [[DST_8]], align 1
+; NO-PADDING-NEXT:    [[GEP_SRC_12:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 12
+; NO-PADDING-NEXT:    [[L_SRC_12:%.*]] = load i8, ptr [[GEP_SRC_12]], align 4
+; NO-PADDING-NEXT:    [[MUL_12:%.*]] = mul nsw i8 [[L_SRC_12]], 10
+; NO-PADDING-NEXT:    [[DST_12:%.*]] = getelementptr i8, ptr [[DST]], i8 12
+; NO-PADDING-NEXT:    store i8 [[MUL_12]], ptr [[DST_12]], align 1
+; NO-PADDING-NEXT:    [[GEP_SRC_13:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 13
+; NO-PADDING-NEXT:    [[L_SRC_13:%.*]] = load i8, ptr [[GEP_SRC_13]], align 4
+; NO-PADDING-NEXT:    [[MUL_13:%.*]] = mul nsw i8 [[L_SRC_13]], 10
+; NO-PADDING-NEXT:    [[DST_13:%.*]] = getelementptr i8, ptr [[DST]], i8 13
+; NO-PADDING-NEXT:    store i8 [[MUL_13]], ptr [[DST_13]], align 1
+; NO-PADDING-NEXT:    [[GEP_SRC_14:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i8 14
+; NO-PADDING-NEXT:    [[L_SRC_14:%.*]] = load i8, ptr [[GEP_SRC_14]], align 4
+; NO-PADDING-NEXT:    [[MUL_14:%.*]] = mul nsw i8 [[L_SRC_14]], 10
+; NO-PADDING-NEXT:    [[DST_14:%.*]] = getelementptr i8, ptr [[DST]], i8 14
+; NO-PADDING-NEXT:    store i8 [[MUL_14]], ptr [[DST_14]], align 1
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   %gep.src.0 = getelementptr inbounds i8, ptr %src, i8 0
@@ -123,5 +153,3 @@ entry:
 
   ret void
 }
-
-
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-base.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-base.ll
index 59ffbf7ef9b247..ac33009107fa29 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-base.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-base.ll
@@ -1,16 +1,71 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
+; RUN: opt -passes=slp-vectorizer -slp-vectorize-with-padding -mtriple=arm64-apple-ios -S %s | FileCheck --check-prefixes=CHECK,PADDING %s
+; RUN: opt -passes=slp-vectorizer -slp-vectorize-with-padding=false -mtriple=arm64-apple-ios -S %s | FileCheck --check-prefixes=CHECK,NO-PADDING %s
 
 define void @v3_load_i32_mul_by_constant_store(ptr %src, ptr %dst) {
-; CHECK-LABEL: @v3_load_i32_mul_by_constant_store(
+; PADDING-LABEL: @v3_load_i32_mul_by_constant_store(
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i32 0
+; PADDING-NEXT:    [[TMP0:%.*]] = load <3 x i32>, ptr [[GEP_SRC_0]], align 4
+; PADDING-NEXT:    [[TMP1:%.*]] = shufflevector <3 x i32> [[TMP0]], <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP2:%.*]] = mul nsw <4 x i32> [[TMP1]], <i32 10, i32 10, i32 10, i32 undef>
+; PADDING-NEXT:    [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x i32> [[TMP3]], ptr [[DST:%.*]], align 4
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: @v3_load_i32_mul_by_constant_store(
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i32 0
+; NO-PADDING-NEXT:    [[GEP_SRC_2:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 2
+; NO-PADDING-NEXT:    [[L_SRC_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 4
+; NO-PADDING-NEXT:    [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_2]], 10
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_0]], align 4
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = mul nsw <2 x i32> [[TMP0]], <i32 10, i32 10>
+; NO-PADDING-NEXT:    store <2 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
+; NO-PADDING-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
+; NO-PADDING-NEXT:    store i32 [[MUL_2]], ptr [[DST_2]], align 4
+; NO-PADDING-NEXT:    ret void
+;
+entry:
+  %gep.src.0 = getelementptr inbounds i32, ptr %src, i32 0
+  %l.src.0 = load i32, ptr %gep.src.0, align 4
+  %mul.0 = mul nsw i32 %l.src.0, 10
+
+  %gep.src.1 = getelementptr inbounds i32, ptr %src, i32 1
+  %l.src.1 = load i32, ptr %gep.src.1, align 4
+  %mul.1 = mul nsw i32 %l.src.1, 10
+
+  %gep.src.2 = getelementptr inbounds i32, ptr %src, i32 2
+  %l.src.2 = load i32, ptr %gep.src.2, align 4
+  %mul.2 = mul nsw i32 %l.src.2, 10
+
+  store i32 %mul.0, ptr %dst
+
+  %dst.1 = getelementptr i32, ptr %dst, i32 1
+  store i32 %mul.1, ptr %dst.1
+
+  %dst.2 = getelementptr i32, ptr %dst, i32 2
+  store i32 %mul.2, ptr %dst.2
+
+  ret void
+}
+
+; Should no be vectorized with a undef/poison element as padding, as division by undef/poison may cause UB.
+define void @v3_load_i32_udiv_by_constant_store(ptr %src, ptr %dst) {
+; CHECK-LABEL: @v3_load_i32_udiv_by_constant_store(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i32 0
+; CHECK-NEXT:    [[L_SRC_0:%.*]] = load i32, ptr [[GEP_SRC_0]], align 4
+; CHECK-NEXT:    [[MUL_0:%.*]] = udiv i32 10, [[L_SRC_0]]
+; CHECK-NEXT:    [[GEP_SRC_1:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 1
+; CHECK-NEXT:    [[L_SRC_1:%.*]] = load i32, ptr [[GEP_SRC_1]], align 4
+; CHECK-NEXT:    [[MUL_1:%.*]] = udiv i32 10, [[L_SRC_1]]
 ; CHECK-NEXT:    [[GEP_SRC_2:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 2
 ; CHECK-NEXT:    [[L_SRC_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 4
-; CHECK-NEXT:    [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_2]], 10
-; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_0]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = mul nsw <2 x i32> [[TMP0]], <i32 10, i32 10>
-; CHECK-NEXT:    store <2 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[MUL_2:%.*]] = udiv i32 10, [[L_SRC_2]]
+; CHECK-NEXT:    store i32 [[MUL_0]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[DST_1:%.*]] = getelementptr i32, ptr [[DST]], i32 1
+; CHECK-NEXT:    store i32 [[MUL_1]], ptr [[DST_1]], align 4
 ; CHECK-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
 ; CHECK-NEXT:    store i32 [[MUL_2]], ptr [[DST_2]], align 4
 ; CHECK-NEXT:    ret void
@@ -18,15 +73,15 @@ define void @v3_load_i32_mul_by_constant_store(ptr %src, ptr %dst) {
 entry:
   %gep.src.0 = getelementptr inbounds i32, ptr %src, i32 0
   %l.src.0 = load i32, ptr %gep.src.0, align 4
-  %mul.0 = mul nsw i32 %l.src.0, 10
+  %mul.0 = udiv i32 10, %l.src.0
 
   %gep.src.1 = getelementptr inbounds i32, ptr %src, i32 1
   %l.src.1 = load i32, ptr %gep.src.1, align 4
-  %mul.1 = mul nsw i32 %l.src.1, 10
+  %mul.1 = udiv i32 10, %l.src.1
 
   %gep.src.2 = getelementptr inbounds i32, ptr %src, i32 2
   %l.src.2 = load i32, ptr %gep.src.2, align 4
-  %mul.2 = mul nsw i32 %l.src.2, 10
+  %mul.2 = udiv i32 10, %l.src.2
 
   store i32 %mul.0, ptr %dst
 
@@ -39,23 +94,38 @@ entry:
   ret void
 }
 
+
+
 define void @v3_load_i32_mul_store(ptr %src.1, ptr %src.2, ptr %dst) {
-; CHECK-LABEL: @v3_load_i32_mul_store(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
-; CHECK-NEXT:    [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
-; CHECK-NEXT:    [[GEP_SRC_1_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 2
-; CHECK-NEXT:    [[L_SRC_1_2:%.*]] = load i32, ptr [[GEP_SRC_1_2]], align 4
-; CHECK-NEXT:    [[GEP_SRC_2_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 2
-; CHECK-NEXT:    [[L_SRC_2_2:%.*]] = load i32, ptr [[GEP_SRC_2_2]], align 4
-; CHECK-NEXT:    [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_1_2]], [[L_SRC_2_2]]
-; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_1_0]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[GEP_SRC_2_0]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = mul nsw <2 x i32> [[TMP0]], [[TMP1]]
-; CHECK-NEXT:    store <2 x i32> [[TMP2]], ptr [[DST:%.*]], align 4
-; CHECK-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
-; CHECK-NEXT:    store i32 [[MUL_2]], ptr [[DST_2]], align 4
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: @v3_load_i32_mul_store(
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
+; PADDING-NEXT:    [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
+; PADDING-NEXT:    [[TMP0:%.*]] = load <3 x i32>, ptr [[GEP_SRC_1_0]], align 4
+; PADDING-NEXT:    [[TMP1:%.*]] = shufflevector <3 x i32> [[TMP0]], <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP2:%.*]] = load <3 x i32>, ptr [[GEP_SRC_2_0]], align 4
+; PADDING-NEXT:    [[TMP3:%.*]] = shufflevector <3 x i32> [[TMP2]], <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP1]], [[TMP3]]
+; PADDING-NEXT:    [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x i32> [[TMP5]], ptr [[DST:%.*]], align 4
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: @v3_load_i32_mul_store(
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
+; NO-PADDING-NEXT:    [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
+; NO-PADDING-NEXT:    [[GEP_SRC_1_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 2
+; NO-PADDING-NEXT:    [[L_SRC_1_2:%.*]] = load i32, ptr [[GEP_SRC_1_2]], align 4
+; NO-PADDING-NEXT:    [[GEP_SRC_2_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 2
+; NO-PADDING-NEXT:    [[L_SRC_2_2:%.*]] = load i32, ptr [[GEP_SRC_2_2]], align 4
+; NO-PADDING-NEXT:    [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_1_2]], [[L_SRC_2_2]]
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_1_0]], align 4
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[GEP_SRC_2_0]], align 4
+; NO-PADDING-NEXT:    [[TMP2:%.*]] = mul nsw <2 x i32> [[TMP0]], [[TMP1]]
+; NO-PADDING-NEXT:    store <2 x i32> [[TMP2]], ptr [[DST:%.*]], align 4
+; NO-PADDING-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
+; NO-PADDING-NEXT:    store i32 [[MUL_2]], ptr [[DST_2]], align 4
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   %gep.src.1.0 = getelementptr inbounds i32, ptr %src.1, i32 0
@@ -88,24 +158,38 @@ entry:
 }
 
 define void @v3_load_i32_mul_add_const_store(ptr %src.1, ptr %src.2, ptr %dst) {
-; CHECK-LABEL: @v3_load_i32_mul_add_const_store(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
-; CHECK-NEXT:    [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
-; CHECK-NEXT:    [[GEP_SRC_1_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 2
-; CHECK-NEXT:    [[L_SRC_1_2:%.*]] = load i32, ptr [[GEP_SRC_1_2]], align 4
-; CHECK-NEXT:    [[GEP_SRC_2_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 2
-; CHECK-NEXT:    [[L_SRC_2_2:%.*]] = load i32, ptr [[GEP_SRC_2_2]], align 4
-; CHECK-NEXT:    [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_1_2]], [[L_SRC_2_2]]
-; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[MUL_2]], 9
-; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_1_0]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[GEP_SRC_2_0]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = mul nsw <2 x i32> [[TMP0]], [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = add <2 x i32> [[TMP2]], <i32 9, i32 9>
-; CHECK-NEXT:    store <2 x i32> [[TMP3]], ptr [[DST:%.*]], align 4
-; CHECK-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
-; CHECK-NEXT:    store i32 [[ADD_2]], ptr [[DST_2]], align 4
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: @v3_load_i32_mul_add_const_store(
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
+; PADDING-NEXT:    [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
+; PADDING-NEXT:    [[TMP0:%.*]] = load <3 x i32>, ptr [[GEP_SRC_1_0]], align 4
+; PADDING-NEXT:    [[TMP1:%.*]] = shufflevector <3 x i32> [[TMP0]], <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP2:%.*]] = load <3 x i32>, ptr [[GEP_SRC_2_0]], align 4
+; PADDING-NEXT:    [[TMP3:%.*]] = shufflevector <3 x i32> [[TMP2]], <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP1]], [[TMP3]]
+; PADDING-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP4]], <i32 9, i32 9, i32 9, i32 undef>
+; PADDING-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x i32> [[TMP6]], ptr [[DST:%.*]], align 4
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: @v3_load_i32_mul_add_const_store(
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[GEP_SRC_1_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_1:%.*]], i32 0
+; NO-PADDING-NEXT:    [[GEP_SRC_2_0:%.*]] = getelementptr inbounds i32, ptr [[SRC_2:%.*]], i32 0
+; NO-PADDING-NEXT:    [[GEP_SRC_1_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_1]], i32 2
+; NO-PADDING-NEXT:    [[L_SRC_1_2:%.*]] = load i32, ptr [[GEP_SRC_1_2]], align 4
+; NO-PADDING-NEXT:    [[GEP_SRC_2_2:%.*]] = getelementptr inbounds i32, ptr [[SRC_2]], i32 2
+; NO-PADDING-NEXT:    [[L_SRC_2_2:%.*]] = load i32, ptr [[GEP_SRC_2_2]], align 4
+; NO-PADDING-NEXT:    [[MUL_2:%.*]] = mul nsw i32 [[L_SRC_1_2]], [[L_SRC_2_2]]
+; NO-PADDING-NEXT:    [[ADD_2:%.*]] = add i32 [[MUL_2]], 9
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[GEP_SRC_1_0]], align 4
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[GEP_SRC_2_0]], align 4
+; NO-PADDING-NEXT:    [[TMP2:%.*]] = mul nsw <2 x i32> [[TMP0]], [[TMP1]]
+; NO-PADDING-NEXT:    [[TMP3:%.*]] = add <2 x i32> [[TMP2]], <i32 9, i32 9>
+; NO-PADDING-NEXT:    store <2 x i32> [[TMP3]], ptr [[DST:%.*]], align 4
+; NO-PADDING-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST]], i32 2
+; NO-PADDING-NEXT:    store i32 [[ADD_2]], ptr [[DST_2]], align 4
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   %gep.src.1.0 = getelementptr inbounds i32, ptr %src.1, i32 0
@@ -141,18 +225,28 @@ entry:
 }
 
 define void @v3_load_f32_fadd_fadd_by_constant_store(ptr %src, ptr %dst) {
-; CHECK-LABEL: @v3_load_f32_fadd_fadd_by_constant_store(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i32 0
-; CHECK-NEXT:    [[GEP_SRC_2:%.*]] = getelementptr inbounds float, ptr [[SRC]], i32 2
-; CHECK-NEXT:    [[L_SRC_2:%.*]] = load float, ptr [[GEP_SRC_2]], align 4
-; CHECK-NEXT:    [[FADD_2:%.*]] = fadd float [[L_SRC_2]], 1.000000e+01
-; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[GEP_SRC_0]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = fadd <2 x float> [[TMP0]], <float 1.000000e+01, float 1.000000e+01>
-; CHECK-NEXT:    store <2 x float> [[TMP1]], ptr [[DST:%.*]], align 4
-; CHECK-NEXT:    [[DST_2:%.*]] = getelementptr float, ptr [[DST]], i32 2
-; CHECK-NEXT:    store float [[FADD_2]], ptr [[DST_2]], align 4
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: @v3_load_f32_fadd_fadd_by_constant_store(
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i32 0
+; PADDING-NEXT:    [[TMP0:%.*]] = load <3 x float>, ptr [[GEP_SRC_0]], align 4
+; PADDING-NEXT:    [[TMP1:%.*]] = shufflevector <3 x float> [[TMP0]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP2:%.*]] = fadd <4 x float> [[TMP1]], <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float undef>
+; PADDING-NEXT:    [[TMP3:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x float> [[TMP3]], ptr [[DST:%.*]], align 4
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: @v3_load_f32_fadd_fadd_by_constant_store(
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[GEP_SRC_0:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i32 0
+; NO-PADDING-NEXT:    [[GEP_SRC_2:%.*]] = getelementptr inbounds float, ptr [[SRC]], i32 2
+; NO-PADDING-NEXT:    [[L_SRC_2:%.*]] = load float, ptr [[GEP_SRC_2]], align 4
+; NO-PADDING-NEXT:    [[FADD_2:%.*]] = fadd float [[L_SRC_2]], 1.000000e+01
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[GEP_SRC_0]], align 4
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = fadd <2 x float> [[TMP0]], <float 1.000000e+01, float 1.000000e+01>
+; NO-PADDING-NEXT:    store <2 x float> [[TMP1]], ptr [[DST:%.*]], align 4
+; NO-PADDING-NEXT:    [[DST_2:%.*]] = getelementptr float, ptr [[DST]], i32 2
+; NO-PADDING-NEXT:    store float [[FADD_2]], ptr [[DST_2]], align 4
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   %gep.src.0 = getelementptr inbounds float, ptr %src, i32 0
@@ -179,18 +273,29 @@ entry:
 }
 
 define void @phi_store3(ptr %dst) {
-; CHECK-LABEL: @phi_store3(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    br label [[EXIT:%.*]]
-; CHECK:       invoke.cont8.loopexit:
-; CHECK-NEXT:    br label [[EXIT]]
-; CHECK:       exit:
-; CHECK-NEXT:    [[P_2:%.*]] = phi i32 [ 3, [[ENTRY:%.*]] ], [ 0, [[INVOKE_CONT8_LOOPEXIT:%.*]] ]
-; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x i32> [ <i32 1, i32 2>, [[ENTRY]] ], [ poison, [[INVOKE_CONT8_LOOPEXIT]] ]
-; CHECK-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST:%.*]], i32 2
-; CHECK-NEXT:    store <2 x i32> [[TMP0]], ptr [[DST]], align 4
-; CHECK-NEXT:    store i32 [[P_2]], ptr [[DST_2]], align 4
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: @phi_store3(
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    br label [[EXIT:%.*]]
+; PADDING:       invoke.cont8.loopexit:
+; PADDING-NEXT:    br label [[EXIT]]
+; PADDING:       exit:
+; PADDING-NEXT:    [[TMP0:%.*]] = phi <4 x i32> [ <i32 1, i32 2, i32 3, i32 undef>, [[ENTRY:%.*]] ], [ poison, [[INVOKE_CONT8_LOOPEXIT:%.*]] ]
+; PADDING-NEXT:    [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: @phi_store3(
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    br label [[EXIT:%.*]]
+; NO-PADDING:       invoke.cont8.loopexit:
+; NO-PADDING-NEXT:    br label [[EXIT]]
+; NO-PADDING:       exit:
+; NO-PADDING-NEXT:    [[P_2:%.*]] = phi i32 [ 3, [[ENTRY:%.*]] ], [ 0, [[INVOKE_CONT8_LOOPEXIT:%.*]] ]
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = phi <2 x i32> [ <i32 1, i32 2>, [[ENTRY]] ], [ poison, [[INVOKE_CONT8_LOOPEXIT]] ]
+; NO-PADDING-NEXT:    [[DST_2:%.*]] = getelementptr i32, ptr [[DST:%.*]], i32 2
+; NO-PADDING-NEXT:    store <2 x i32> [[TMP0]], ptr [[DST]], align 4
+; NO-PADDING-NEXT:    store i32 [[P_2]], ptr [[DST_2]], align 4
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   br label %exit
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-calls.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-calls.ll
index 2cb84eeb7fc8f4..780dee85e2b98d 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-calls.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-calls.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
+; RUN: opt -passes=slp-vectorizer -slp-vectorize-with-padding -mtriple=arm64-apple-ios -S %s | FileCheck --check-prefixes=CHECK,PADDING %s
+; RUN: opt -passes=slp-vectorizer -slp-vectorize-with-padding=false -mtriple=arm64-apple-ios -S %s | FileCheck --check-prefixes=CHECK,NO-PADDING %s
 
 define void @vec3_vectorize_call(ptr %Colour, float %0) {
 ; CHECK-LABEL: @vec3_vectorize_call(
@@ -58,3 +59,6 @@ entry:
 declare float @llvm.fmuladd.f32(float, float, float)
 
 declare double @llvm.fmuladd.f64(double, double, double)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; NO-PADDING: {{.*}}
+; PADDING: {{.*}}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll
index 5707e143ad5515..59cace2df306f2 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -passes=slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
+; RUN: opt -passes=slp-vectorizer -slp-vectorize-with-padding -mtriple=arm64-apple-ios -S %s | FileCheck --check-prefixes=CHECK,PADDING %s
+; RUN: opt -passes=slp-vectorizer -slp-vectorize-with-padding=false -mtriple=arm64-apple-ios -S %s | FileCheck --check-prefixes=CHECK,NO-PADDING %s
 
 %struct.zot = type { i32, i32, i32 }
 
@@ -228,19 +229,36 @@ entry:
 }
 
 define void @reorder_indices_2(ptr %spoint) {
-; CHECK-LABEL: define void @reorder_indices_2(
-; CHECK-SAME: ptr [[SPOINT:%.*]]) {
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <3 x float> zeroinitializer, i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float 0.000000e+00, float 0.000000e+00)
-; CHECK-NEXT:    [[MUL4_I461:%.*]] = fmul float [[TMP1]], 0.000000e+00
-; CHECK-NEXT:    [[DSCO:%.*]] = getelementptr float, ptr [[SPOINT]], i64 0
-; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> zeroinitializer, <2 x float> zeroinitializer, <2 x float> zeroinitializer)
-; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x float> [[TMP2]], zeroinitializer
-; CHECK-NEXT:    store <2 x float> [[TMP3]], ptr [[DSCO]], align 4
-; CHECK-NEXT:    [[ARRAYIDX5_I476:%.*]] = getelementptr float, ptr [[SPOINT]], i64 2
-; CHECK-NEXT:    store float [[MUL4_I461]], ptr [[ARRAYIDX5_I476]], align 4
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: define void @reorder_indices_2(
+; PADDING-SAME: ptr [[SPOINT:%.*]]) {
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[TMP0:%.*]] = extractelement <3 x float> zeroinitializer, i64 1
+; PADDING-NEXT:    [[TMP1:%.*]] = extractelement <3 x float> zeroinitializer, i64 2
+; PADDING-NEXT:    [[TMP2:%.*]] = extractelement <3 x float> zeroinitializer, i64 0
+; PADDING-NEXT:    [[DSCO:%.*]] = getelementptr float, ptr [[SPOINT]], i64 0
+; PADDING-NEXT:    [[TMP3:%.*]] = insertelement <3 x float> poison, float [[TMP0]], i32 0
+; PADDING-NEXT:    [[TMP4:%.*]] = insertelement <3 x float> [[TMP3]], float [[TMP1]], i32 1
+; PADDING-NEXT:    [[TMP5:%.*]] = insertelement <3 x float> [[TMP4]], float [[TMP2]], i32 2
+; PADDING-NEXT:    [[TMP6:%.*]] = shufflevector <3 x float> [[TMP5]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP7:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[TMP6]], <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>, <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>)
+; PADDING-NEXT:    [[TMP8:%.*]] = fmul <4 x float> [[TMP7]], <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>
+; PADDING-NEXT:    [[TMP9:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x float> [[TMP9]], ptr [[DSCO]], align 4
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: define void @reorder_indices_2(
+; NO-PADDING-SAME: ptr [[SPOINT:%.*]]) {
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = extractelement <3 x float> zeroinitializer, i64 0
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float 0.000000e+00, float 0.000000e+00)
+; NO-PADDING-NEXT:    [[MUL4_I461:%.*]] = fmul float [[TMP1]], 0.000000e+00
+; NO-PADDING-NEXT:    [[DSCO:%.*]] = getelementptr float, ptr [[SPOINT]], i64 0
+; NO-PADDING-NEXT:    [[TMP2:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> zeroinitializer, <2 x float> zeroinitializer, <2 x float> zeroinitializer)
+; NO-PADDING-NEXT:    [[TMP3:%.*]] = fmul <2 x float> [[TMP2]], zeroinitializer
+; NO-PADDING-NEXT:    store <2 x float> [[TMP3]], ptr [[DSCO]], align 4
+; NO-PADDING-NEXT:    [[ARRAYIDX5_I476:%.*]] = getelementptr float, ptr [[SPOINT]], i64 2
+; NO-PADDING-NEXT:    store float [[MUL4_I461]], ptr [[ARRAYIDX5_I476]], align 4
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   %0 = extractelement <3 x float> zeroinitializer, i64 1
@@ -488,18 +506,25 @@ entry:
 }
 
 define void @vec3_extract(<3 x i16> %pixel.sroa.0.4.vec.insert606, ptr %call3.i536) {
-; CHECK-LABEL: define void @vec3_extract(
-; CHECK-SAME: <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606:%.*]], ptr [[CALL3_I536:%.*]]) {
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[PIXEL_SROA_0_4_VEC_EXTRACT:%.*]] = extractelement <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], i64 2
-; CHECK-NEXT:    [[RED668:%.*]] = getelementptr i16, ptr [[CALL3_I536]], i64 2
-; CHECK-NEXT:    store i16 [[PIXEL_SROA_0_4_VEC_EXTRACT]], ptr [[RED668]], align 2
-; CHECK-NEXT:    [[PIXEL_SROA_0_2_VEC_EXTRACT:%.*]] = extractelement <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], i64 1
-; CHECK-NEXT:    [[GREEN670:%.*]] = getelementptr i16, ptr [[CALL3_I536]], i64 1
-; CHECK-NEXT:    store i16 [[PIXEL_SROA_0_2_VEC_EXTRACT]], ptr [[GREEN670]], align 2
-; CHECK-NEXT:    [[PIXEL_SROA_0_0_VEC_EXTRACT:%.*]] = extractelement <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], i64 0
-; CHECK-NEXT:    store i16 [[PIXEL_SROA_0_0_VEC_EXTRACT]], ptr [[CALL3_I536]], align 2
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: define void @vec3_extract(
+; PADDING-SAME: <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606:%.*]], ptr [[CALL3_I536:%.*]]) {
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[TMP0:%.*]] = shufflevector <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], <3 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    store <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], ptr [[CALL3_I536]], align 2
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: define void @vec3_extract(
+; NO-PADDING-SAME: <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606:%.*]], ptr [[CALL3_I536:%.*]]) {
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[PIXEL_SROA_0_4_VEC_EXTRACT:%.*]] = extractelement <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], i64 2
+; NO-PADDING-NEXT:    [[RED668:%.*]] = getelementptr i16, ptr [[CALL3_I536]], i64 2
+; NO-PADDING-NEXT:    store i16 [[PIXEL_SROA_0_4_VEC_EXTRACT]], ptr [[RED668]], align 2
+; NO-PADDING-NEXT:    [[PIXEL_SROA_0_2_VEC_EXTRACT:%.*]] = extractelement <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], i64 1
+; NO-PADDING-NEXT:    [[GREEN670:%.*]] = getelementptr i16, ptr [[CALL3_I536]], i64 1
+; NO-PADDING-NEXT:    store i16 [[PIXEL_SROA_0_2_VEC_EXTRACT]], ptr [[GREEN670]], align 2
+; NO-PADDING-NEXT:    [[PIXEL_SROA_0_0_VEC_EXTRACT:%.*]] = extractelement <3 x i16> [[PIXEL_SROA_0_4_VEC_INSERT606]], i64 0
+; NO-PADDING-NEXT:    store i16 [[PIXEL_SROA_0_0_VEC_EXTRACT]], ptr [[CALL3_I536]], align 2
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   %pixel.sroa.0.4.vec.extract = extractelement <3 x i16> %pixel.sroa.0.4.vec.insert606, i64 2
@@ -513,4 +538,46 @@ entry:
   ret void
 }
 
+define void @can_reorder_vec3_op_with_padding(ptr %A, <3 x float> %in) {
+; CHECK-LABEL: define void @can_reorder_vec3_op_with_padding(
+; CHECK-SAME: ptr [[A:%.*]], <3 x float> [[IN:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX42_I:%.*]] = getelementptr float, ptr [[A]], i64 2
+; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <3 x float> [[IN]], i64 0
+; CHECK-NEXT:    [[SUB_I362:%.*]] = fsub float [[TMP0]], [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.fmuladd.f32(float [[SUB_I362]], float 2.000000e+00, float 3.000000e+00)
+; CHECK-NEXT:    [[MUL6_I_I_I_I:%.*]] = fmul float [[TMP1]], 3.000000e+00
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <3 x float> [[IN]], <3 x float> poison, <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT:    [[TMP3:%.*]] = fsub <2 x float> [[TMP2]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP3]], <2 x float> <float 2.000000e+00, float 2.000000e+00>, <2 x float> <float 3.000000e+00, float 3.000000e+00>)
+; CHECK-NEXT:    [[TMP5:%.*]] = fmul <2 x float> [[TMP4]], <float 3.000000e+00, float 3.000000e+00>
+; CHECK-NEXT:    store <2 x float> [[TMP5]], ptr [[A]], align 4
+; CHECK-NEXT:    store float [[MUL6_I_I_I_I]], ptr [[ARRAYIDX42_I]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arrayidx42.i = getelementptr float, ptr %A, i64 2
+  %arrayidx35.i = getelementptr float, ptr %A, i64 1
+  %0 = extractelement <3 x float> %in, i64 0
+  %1 = extractelement <3 x float> %in, i64 0
+  %sub.i362 = fsub float %0, %1
+  %2 = extractelement <3 x float> %in, i64 1
+  %3 = extractelement <3 x float> %in, i64 1
+  %sub5.i = fsub float %2, %3
+  %4 = extractelement <3 x float> %in, i64 2
+  %5 = extractelement <3 x float> %in, i64 2
+  %sub9.i = fsub float %4, %5
+  %6 = call float @llvm.fmuladd.f32(float %sub5.i, float 2.000000e+00, float 3.000000e+00)
+  %7 = call float @llvm.fmuladd.f32(float %sub9.i, float 2.000000e+00, float 3.000000e+00)
+  %8 = call float @llvm.fmuladd.f32(float %sub.i362, float 2.000000e+00, float 3.000000e+00)
+  %mul.i.i.i.i373 = fmul float %6, 3.000000e+00
+  %mul3.i.i.i.i = fmul float %7, 3.000000e+00
+  %mul6.i.i.i.i = fmul float %8, 3.000000e+00
+  store float %mul.i.i.i.i373, ptr %A, align 4
+  store float %mul3.i.i.i.i, ptr %arrayidx35.i, align 4
+  store float %mul6.i.i.i.i, ptr %arrayidx42.i, align 4
+  ret void
+}
+
 declare float @llvm.fmuladd.f32(float, float, float)
+declare double @llvm.fmuladd.f64(double, double, double)
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/odd_store.ll b/llvm/test/Transforms/SLPVectorizer/X86/odd_store.ll
index 4795ac65592037..6d9d77ebecf59f 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/odd_store.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/odd_store.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=slp-vectorizer,dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+; RUN: opt < %s -passes=slp-vectorizer,dce -slp-vectorize-with-padding -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck --check-prefixes=CHECK,PADDING %s
+; RUN: opt < %s -passes=slp-vectorizer,dce -slp-vectorize-with-padding=false -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck --check-prefixes=CHECK,NO-PADDING %s
 
 ;int foo(char * restrict A, ptr restrict B, float T) {
 ;  A[0] = (T * B[10] + 4.0);
@@ -8,31 +9,47 @@
 ;}
 
 define i32 @foo(ptr noalias nocapture %A, ptr noalias nocapture %B, float %T) {
-; CHECK-LABEL: @foo(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 10
-; CHECK-NEXT:    [[TMP2:%.*]] = load float, ptr [[TMP1]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = fmul float [[TMP2]], [[T:%.*]]
-; CHECK-NEXT:    [[TMP4:%.*]] = fpext float [[TMP3]] to double
-; CHECK-NEXT:    [[TMP5:%.*]] = fadd double [[TMP4]], 4.000000e+00
-; CHECK-NEXT:    [[TMP6:%.*]] = fptosi double [[TMP5]] to i8
-; CHECK-NEXT:    store i8 [[TMP6]], ptr [[A:%.*]], align 1
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 11
-; CHECK-NEXT:    [[TMP8:%.*]] = load float, ptr [[TMP7]], align 4
-; CHECK-NEXT:    [[TMP9:%.*]] = fmul float [[TMP8]], [[T]]
-; CHECK-NEXT:    [[TMP10:%.*]] = fpext float [[TMP9]] to double
-; CHECK-NEXT:    [[TMP11:%.*]] = fadd double [[TMP10]], 5.000000e+00
-; CHECK-NEXT:    [[TMP12:%.*]] = fptosi double [[TMP11]] to i8
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 1
-; CHECK-NEXT:    store i8 [[TMP12]], ptr [[TMP13]], align 1
-; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 12
-; CHECK-NEXT:    [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4
-; CHECK-NEXT:    [[TMP16:%.*]] = fmul float [[TMP15]], [[T]]
-; CHECK-NEXT:    [[TMP17:%.*]] = fpext float [[TMP16]] to double
-; CHECK-NEXT:    [[TMP18:%.*]] = fadd double [[TMP17]], 6.000000e+00
-; CHECK-NEXT:    [[TMP19:%.*]] = fptosi double [[TMP18]] to i8
-; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 2
-; CHECK-NEXT:    store i8 [[TMP19]], ptr [[TMP20]], align 1
-; CHECK-NEXT:    ret i32 undef
+; PADDING-LABEL: @foo(
+; PADDING-NEXT:    [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 10
+; PADDING-NEXT:    [[TMP2:%.*]] = load <3 x float>, ptr [[TMP1]], align 4
+; PADDING-NEXT:    [[TMP3:%.*]] = shufflevector <3 x float> [[TMP2]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP4:%.*]] = insertelement <3 x float> poison, float [[T:%.*]], i32 0
+; PADDING-NEXT:    [[TMP5:%.*]] = insertelement <3 x float> [[TMP4]], float [[T]], i32 1
+; PADDING-NEXT:    [[TMP6:%.*]] = insertelement <3 x float> [[TMP5]], float [[T]], i32 2
+; PADDING-NEXT:    [[TMP7:%.*]] = shufflevector <3 x float> [[TMP6]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP8:%.*]] = fmul <4 x float> [[TMP3]], [[TMP7]]
+; PADDING-NEXT:    [[TMP9:%.*]] = fpext <4 x float> [[TMP8]] to <4 x double>
+; PADDING-NEXT:    [[TMP10:%.*]] = fadd <4 x double> [[TMP9]], <double 4.000000e+00, double 5.000000e+00, double 6.000000e+00, double undef>
+; PADDING-NEXT:    [[TMP11:%.*]] = fptosi <4 x double> [[TMP10]] to <4 x i8>
+; PADDING-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i8> [[TMP11]], <4 x i8> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x i8> [[TMP12]], ptr [[A:%.*]], align 1
+; PADDING-NEXT:    ret i32 undef
+;
+; NO-PADDING-LABEL: @foo(
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 10
+; NO-PADDING-NEXT:    [[TMP2:%.*]] = load float, ptr [[TMP1]], align 4
+; NO-PADDING-NEXT:    [[TMP3:%.*]] = fmul float [[TMP2]], [[T:%.*]]
+; NO-PADDING-NEXT:    [[TMP4:%.*]] = fpext float [[TMP3]] to double
+; NO-PADDING-NEXT:    [[TMP5:%.*]] = fadd double [[TMP4]], 4.000000e+00
+; NO-PADDING-NEXT:    [[TMP6:%.*]] = fptosi double [[TMP5]] to i8
+; NO-PADDING-NEXT:    store i8 [[TMP6]], ptr [[A:%.*]], align 1
+; NO-PADDING-NEXT:    [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 11
+; NO-PADDING-NEXT:    [[TMP8:%.*]] = load float, ptr [[TMP7]], align 4
+; NO-PADDING-NEXT:    [[TMP9:%.*]] = fmul float [[TMP8]], [[T]]
+; NO-PADDING-NEXT:    [[TMP10:%.*]] = fpext float [[TMP9]] to double
+; NO-PADDING-NEXT:    [[TMP11:%.*]] = fadd double [[TMP10]], 5.000000e+00
+; NO-PADDING-NEXT:    [[TMP12:%.*]] = fptosi double [[TMP11]] to i8
+; NO-PADDING-NEXT:    [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 1
+; NO-PADDING-NEXT:    store i8 [[TMP12]], ptr [[TMP13]], align 1
+; NO-PADDING-NEXT:    [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 12
+; NO-PADDING-NEXT:    [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4
+; NO-PADDING-NEXT:    [[TMP16:%.*]] = fmul float [[TMP15]], [[T]]
+; NO-PADDING-NEXT:    [[TMP17:%.*]] = fpext float [[TMP16]] to double
+; NO-PADDING-NEXT:    [[TMP18:%.*]] = fadd double [[TMP17]], 6.000000e+00
+; NO-PADDING-NEXT:    [[TMP19:%.*]] = fptosi double [[TMP18]] to i8
+; NO-PADDING-NEXT:    [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 2
+; NO-PADDING-NEXT:    store i8 [[TMP19]], ptr [[TMP20]], align 1
+; NO-PADDING-NEXT:    ret i32 undef
 ;
   %1 = getelementptr inbounds float, ptr %B, i64 10
   %2 = load float, ptr %1, align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll b/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
index 22cd408cd6dc7f..02a807620d3ea6 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
@@ -1,12 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 < %s | FileCheck %s
+; RUN: opt -passes=slp-vectorizer -slp-vectorize-with-padding -S -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 < %s | FileCheck --check-prefixes=CHECK,PADDING %s
+; RUN: opt -passes=slp-vectorizer -slp-vectorize-with-padding=false -S -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 < %s | FileCheck --check-prefixes=CHECK,NO-PADDING %s
 
 define void @add0(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @add0(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = add nsw <4 x i32> [[TMP1]], <i32 1, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    store <4 x i32> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = add nsw <4 x i32> [[TMP0]], <i32 1, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -32,21 +33,34 @@ entry:
 }
 
 define void @add1(ptr noalias %dst, ptr noalias %src) {
-; CHECK-LABEL: @add1(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
-; CHECK-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1
-; CHECK-NEXT:    store i32 [[TMP0]], ptr [[DST]], align 4
-; CHECK-NEXT:    [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 3
-; CHECK-NEXT:    [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 3
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = add nsw <2 x i32> [[TMP2]], <i32 1, i32 2>
-; CHECK-NEXT:    store <2 x i32> [[TMP3]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[INCDEC_PTR5]], align 4
-; CHECK-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP5]], 3
-; CHECK-NEXT:    store i32 [[ADD9]], ptr [[INCDEC_PTR7]], align 4
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: @add1(
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1
+; PADDING-NEXT:    [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
+; PADDING-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1
+; PADDING-NEXT:    store i32 [[TMP0]], ptr [[DST]], align 4
+; PADDING-NEXT:    [[TMP1:%.*]] = load <3 x i32>, ptr [[INCDEC_PTR]], align 4
+; PADDING-NEXT:    [[TMP2:%.*]] = shufflevector <3 x i32> [[TMP1]], <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP3:%.*]] = add nsw <4 x i32> [[TMP2]], <i32 1, i32 2, i32 3, i32 undef>
+; PADDING-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x i32> [[TMP4]], ptr [[INCDEC_PTR1]], align 4
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: @add1(
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
+; NO-PADDING-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1
+; NO-PADDING-NEXT:    store i32 [[TMP0]], ptr [[DST]], align 4
+; NO-PADDING-NEXT:    [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 3
+; NO-PADDING-NEXT:    [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 3
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR]], align 4
+; NO-PADDING-NEXT:    [[TMP2:%.*]] = add nsw <2 x i32> [[TMP1]], <i32 1, i32 2>
+; NO-PADDING-NEXT:    store <2 x i32> [[TMP2]], ptr [[INCDEC_PTR1]], align 4
+; NO-PADDING-NEXT:    [[TMP3:%.*]] = load i32, ptr [[INCDEC_PTR5]], align 4
+; NO-PADDING-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP3]], 3
+; NO-PADDING-NEXT:    store i32 [[ADD9]], ptr [[INCDEC_PTR7]], align 4
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   %incdec.ptr = getelementptr inbounds i32, ptr %src, i64 1
@@ -81,9 +95,9 @@ define void @sub0(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[INCDEC_PTR]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 2
 ; CHECK-NEXT:    store i32 [[TMP1]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <2 x i32> [[TMP3]], <i32 -2, i32 -3>
-; CHECK-NEXT:    store <2 x i32> [[TMP4]], ptr [[INCDEC_PTR3]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = add nsw <2 x i32> [[TMP2]], <i32 -2, i32 -3>
+; CHECK-NEXT:    store <2 x i32> [[TMP3]], ptr [[INCDEC_PTR3]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -110,9 +124,9 @@ entry:
 define void @sub1(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @sub1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = add nsw <4 x i32> [[TMP1]], <i32 4, i32 -1, i32 -2, i32 -3>
-; CHECK-NEXT:    store <4 x i32> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = add nsw <4 x i32> [[TMP0]], <i32 4, i32 -1, i32 -2, i32 -3>
+; CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -140,9 +154,9 @@ entry:
 define void @sub2(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @sub2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = add nsw <4 x i32> [[TMP1]], <i32 -1, i32 -1, i32 -2, i32 -3>
-; CHECK-NEXT:    store <4 x i32> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = add nsw <4 x i32> [[TMP0]], <i32 -1, i32 -1, i32 -2, i32 -3>
+; CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -179,11 +193,11 @@ define void @addsub0(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[INCDEC_PTR]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 2
 ; CHECK-NEXT:    store i32 [[TMP1]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <2 x i32> [[TMP3]], <i32 -2, i32 -3>
-; CHECK-NEXT:    [[TMP5:%.*]] = sub nsw <2 x i32> [[TMP3]], <i32 -2, i32 -3>
-; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <2 x i32> [[TMP4]], <2 x i32> [[TMP5]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT:    store <2 x i32> [[TMP6]], ptr [[INCDEC_PTR3]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = add nsw <2 x i32> [[TMP2]], <i32 -2, i32 -3>
+; CHECK-NEXT:    [[TMP4:%.*]] = sub nsw <2 x i32> [[TMP2]], <i32 -2, i32 -3>
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT:    store <2 x i32> [[TMP5]], ptr [[INCDEC_PTR3]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -212,17 +226,17 @@ define void @addsub1(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 2
 ; CHECK-NEXT:    [[INCDEC_PTR3:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 2
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[SRC]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = add nsw <2 x i32> [[TMP1]], <i32 -1, i32 -1>
-; CHECK-NEXT:    [[TMP3:%.*]] = sub nsw <2 x i32> [[TMP1]], <i32 -1, i32 -1>
-; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT:    store <2 x i32> [[TMP4]], ptr [[DST]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[SRC]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = add nsw <2 x i32> [[TMP0]], <i32 -1, i32 -1>
+; CHECK-NEXT:    [[TMP2:%.*]] = sub nsw <2 x i32> [[TMP0]], <i32 -1, i32 -1>
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP2]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT:    store <2 x i32> [[TMP3]], ptr [[DST]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 3
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[INCDEC_PTR2]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 3
-; CHECK-NEXT:    store i32 [[TMP6]], ptr [[INCDEC_PTR3]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr [[INCDEC_PTR4]], align 4
-; CHECK-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP7]], -3
+; CHECK-NEXT:    store i32 [[TMP4]], ptr [[INCDEC_PTR3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[INCDEC_PTR4]], align 4
+; CHECK-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP5]], -3
 ; CHECK-NEXT:    store i32 [[SUB8]], ptr [[INCDEC_PTR6]], align 4
 ; CHECK-NEXT:    ret void
 ;
@@ -252,15 +266,15 @@ define void @mul(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[INCDEC_PTR2:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 2
 ; CHECK-NEXT:    [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 2
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[SRC]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = mul nsw <2 x i32> [[TMP1]], <i32 257, i32 -3>
-; CHECK-NEXT:    store <2 x i32> [[TMP2]], ptr [[DST]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[SRC]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = mul nsw <2 x i32> [[TMP0]], <i32 257, i32 -3>
+; CHECK-NEXT:    store <2 x i32> [[TMP1]], ptr [[DST]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 3
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[INCDEC_PTR2]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 3
-; CHECK-NEXT:    store i32 [[TMP4]], ptr [[INCDEC_PTR4]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[INCDEC_PTR5]], align 4
-; CHECK-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[TMP5]], -9
+; CHECK-NEXT:    store i32 [[TMP2]], ptr [[INCDEC_PTR4]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[INCDEC_PTR5]], align 4
+; CHECK-NEXT:    [[MUL9:%.*]] = mul nsw i32 [[TMP3]], -9
 ; CHECK-NEXT:    store i32 [[MUL9]], ptr [[INCDEC_PTR7]], align 4
 ; CHECK-NEXT:    ret void
 ;
@@ -286,21 +300,34 @@ entry:
 }
 
 define void @shl0(ptr noalias %dst, ptr noalias %src) {
-; CHECK-LABEL: @shl0(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
-; CHECK-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1
-; CHECK-NEXT:    store i32 [[TMP0]], ptr [[DST]], align 4
-; CHECK-NEXT:    [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 3
-; CHECK-NEXT:    [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 3
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = shl <2 x i32> [[TMP2]], <i32 1, i32 2>
-; CHECK-NEXT:    store <2 x i32> [[TMP3]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[INCDEC_PTR4]], align 4
-; CHECK-NEXT:    [[SHL8:%.*]] = shl i32 [[TMP5]], 3
-; CHECK-NEXT:    store i32 [[SHL8]], ptr [[INCDEC_PTR6]], align 4
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: @shl0(
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1
+; PADDING-NEXT:    [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
+; PADDING-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1
+; PADDING-NEXT:    store i32 [[TMP0]], ptr [[DST]], align 4
+; PADDING-NEXT:    [[TMP1:%.*]] = load <3 x i32>, ptr [[INCDEC_PTR]], align 4
+; PADDING-NEXT:    [[TMP2:%.*]] = shufflevector <3 x i32> [[TMP1]], <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP3:%.*]] = shl <4 x i32> [[TMP2]], <i32 1, i32 2, i32 3, i32 undef>
+; PADDING-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x i32> [[TMP4]], ptr [[INCDEC_PTR1]], align 4
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: @shl0(
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
+; NO-PADDING-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1
+; NO-PADDING-NEXT:    store i32 [[TMP0]], ptr [[DST]], align 4
+; NO-PADDING-NEXT:    [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 3
+; NO-PADDING-NEXT:    [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 3
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR]], align 4
+; NO-PADDING-NEXT:    [[TMP2:%.*]] = shl <2 x i32> [[TMP1]], <i32 1, i32 2>
+; NO-PADDING-NEXT:    store <2 x i32> [[TMP2]], ptr [[INCDEC_PTR1]], align 4
+; NO-PADDING-NEXT:    [[TMP3:%.*]] = load i32, ptr [[INCDEC_PTR4]], align 4
+; NO-PADDING-NEXT:    [[SHL8:%.*]] = shl i32 [[TMP3]], 3
+; NO-PADDING-NEXT:    store i32 [[SHL8]], ptr [[INCDEC_PTR6]], align 4
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   %incdec.ptr = getelementptr inbounds i32, ptr %src, i64 1
@@ -326,9 +353,9 @@ entry:
 define void @shl1(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @shl1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = shl <4 x i32> [[TMP1]], <i32 7, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    store <4 x i32> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <4 x i32> [[TMP0]], <i32 7, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -356,9 +383,9 @@ entry:
 define void @add0f(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @add0f(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = fadd fast <4 x float> [[TMP1]], <float 1.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>
-; CHECK-NEXT:    store <4 x float> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd fast <4 x float> [[TMP0]], <float 1.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>
+; CHECK-NEXT:    store <4 x float> [[TMP1]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -384,21 +411,34 @@ entry:
 }
 
 define void @add1f(ptr noalias %dst, ptr noalias %src) {
-; CHECK-LABEL: @add1f(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 1
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[SRC]], align 4
-; CHECK-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 1
-; CHECK-NEXT:    store float [[TMP0]], ptr [[DST]], align 4
-; CHECK-NEXT:    [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 3
-; CHECK-NEXT:    [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 3
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x float>, ptr [[INCDEC_PTR]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = fadd fast <2 x float> [[TMP2]], <float 1.000000e+00, float 2.000000e+00>
-; CHECK-NEXT:    store <2 x float> [[TMP3]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[INCDEC_PTR5]], align 4
-; CHECK-NEXT:    [[ADD9:%.*]] = fadd fast float [[TMP5]], 3.000000e+00
-; CHECK-NEXT:    store float [[ADD9]], ptr [[INCDEC_PTR7]], align 4
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: @add1f(
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 1
+; PADDING-NEXT:    [[TMP0:%.*]] = load float, ptr [[SRC]], align 4
+; PADDING-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 1
+; PADDING-NEXT:    store float [[TMP0]], ptr [[DST]], align 4
+; PADDING-NEXT:    [[TMP1:%.*]] = load <3 x float>, ptr [[INCDEC_PTR]], align 4
+; PADDING-NEXT:    [[TMP2:%.*]] = shufflevector <3 x float> [[TMP1]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP3:%.*]] = fadd fast <4 x float> [[TMP2]], <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float undef>
+; PADDING-NEXT:    [[TMP4:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x float> [[TMP4]], ptr [[INCDEC_PTR1]], align 4
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: @add1f(
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 1
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = load float, ptr [[SRC]], align 4
+; NO-PADDING-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 1
+; NO-PADDING-NEXT:    store float [[TMP0]], ptr [[DST]], align 4
+; NO-PADDING-NEXT:    [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 3
+; NO-PADDING-NEXT:    [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 3
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[INCDEC_PTR]], align 4
+; NO-PADDING-NEXT:    [[TMP2:%.*]] = fadd fast <2 x float> [[TMP1]], <float 1.000000e+00, float 2.000000e+00>
+; NO-PADDING-NEXT:    store <2 x float> [[TMP2]], ptr [[INCDEC_PTR1]], align 4
+; NO-PADDING-NEXT:    [[TMP3:%.*]] = load float, ptr [[INCDEC_PTR5]], align 4
+; NO-PADDING-NEXT:    [[ADD9:%.*]] = fadd fast float [[TMP3]], 3.000000e+00
+; NO-PADDING-NEXT:    store float [[ADD9]], ptr [[INCDEC_PTR7]], align 4
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   %incdec.ptr = getelementptr inbounds float, ptr %src, i64 1
@@ -433,9 +473,9 @@ define void @sub0f(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[INCDEC_PTR]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 2
 ; CHECK-NEXT:    store float [[TMP1]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x float>, ptr [[INCDEC_PTR2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = fadd fast <2 x float> [[TMP3]], <float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    store <2 x float> [[TMP4]], ptr [[INCDEC_PTR4]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x float>, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd fast <2 x float> [[TMP2]], <float -2.000000e+00, float -3.000000e+00>
+; CHECK-NEXT:    store <2 x float> [[TMP3]], ptr [[INCDEC_PTR4]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -462,9 +502,9 @@ entry:
 define void @sub1f(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @sub1f(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = fadd fast <4 x float> [[TMP1]], <float 4.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    store <4 x float> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd fast <4 x float> [[TMP0]], <float 4.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>
+; CHECK-NEXT:    store <4 x float> [[TMP1]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -492,9 +532,9 @@ entry:
 define void @sub2f(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @sub2f(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = fadd fast <4 x float> [[TMP1]], <float -1.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    store <4 x float> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd fast <4 x float> [[TMP0]], <float -1.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>
+; CHECK-NEXT:    store <4 x float> [[TMP1]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -531,11 +571,11 @@ define void @addsub0f(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[INCDEC_PTR]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 2
 ; CHECK-NEXT:    store float [[TMP1]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x float>, ptr [[INCDEC_PTR2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = fadd fast <2 x float> [[TMP3]], <float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    [[TMP5:%.*]] = fsub fast <2 x float> [[TMP3]], <float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> [[TMP5]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT:    store <2 x float> [[TMP6]], ptr [[INCDEC_PTR3]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x float>, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd fast <2 x float> [[TMP2]], <float -2.000000e+00, float -3.000000e+00>
+; CHECK-NEXT:    [[TMP4:%.*]] = fsub fast <2 x float> [[TMP2]], <float -2.000000e+00, float -3.000000e+00>
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> [[TMP4]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT:    store <2 x float> [[TMP5]], ptr [[INCDEC_PTR3]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -564,17 +604,17 @@ define void @addsub1f(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 2
 ; CHECK-NEXT:    [[INCDEC_PTR3:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 2
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[SRC]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = fadd fast <2 x float> [[TMP1]], <float -1.000000e+00, float -1.000000e+00>
-; CHECK-NEXT:    [[TMP3:%.*]] = fsub fast <2 x float> [[TMP1]], <float -1.000000e+00, float -1.000000e+00>
-; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> [[TMP3]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT:    store <2 x float> [[TMP4]], ptr [[DST]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[SRC]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd fast <2 x float> [[TMP0]], <float -1.000000e+00, float -1.000000e+00>
+; CHECK-NEXT:    [[TMP2:%.*]] = fsub fast <2 x float> [[TMP0]], <float -1.000000e+00, float -1.000000e+00>
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP2]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT:    store <2 x float> [[TMP3]], ptr [[DST]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 3
-; CHECK-NEXT:    [[TMP6:%.*]] = load float, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = load float, ptr [[INCDEC_PTR2]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR6:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 3
-; CHECK-NEXT:    store float [[TMP6]], ptr [[INCDEC_PTR3]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = load float, ptr [[INCDEC_PTR4]], align 4
-; CHECK-NEXT:    [[SUB8:%.*]] = fsub fast float [[TMP7]], -3.000000e+00
+; CHECK-NEXT:    store float [[TMP4]], ptr [[INCDEC_PTR3]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[INCDEC_PTR4]], align 4
+; CHECK-NEXT:    [[SUB8:%.*]] = fsub fast float [[TMP5]], -3.000000e+00
 ; CHECK-NEXT:    store float [[SUB8]], ptr [[INCDEC_PTR6]], align 4
 ; CHECK-NEXT:    ret void
 ;
@@ -604,15 +644,15 @@ define void @mulf(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 2
 ; CHECK-NEXT:    [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 2
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[SRC]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = fmul fast <2 x float> [[TMP1]], <float 2.570000e+02, float -3.000000e+00>
-; CHECK-NEXT:    store <2 x float> [[TMP2]], ptr [[DST]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[SRC]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fmul fast <2 x float> [[TMP0]], <float 2.570000e+02, float -3.000000e+00>
+; CHECK-NEXT:    store <2 x float> [[TMP1]], ptr [[DST]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 3
-; CHECK-NEXT:    [[TMP4:%.*]] = load float, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load float, ptr [[INCDEC_PTR2]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 3
-; CHECK-NEXT:    store float [[TMP4]], ptr [[INCDEC_PTR4]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[INCDEC_PTR5]], align 4
-; CHECK-NEXT:    [[SUB9:%.*]] = fmul fast float [[TMP5]], -9.000000e+00
+; CHECK-NEXT:    store float [[TMP2]], ptr [[INCDEC_PTR4]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr [[INCDEC_PTR5]], align 4
+; CHECK-NEXT:    [[SUB9:%.*]] = fmul fast float [[TMP3]], -9.000000e+00
 ; CHECK-NEXT:    store float [[SUB9]], ptr [[INCDEC_PTR7]], align 4
 ; CHECK-NEXT:    ret void
 ;
@@ -640,9 +680,9 @@ entry:
 define void @add0fn(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @add0fn(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = fadd <4 x float> [[TMP1]], <float 1.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>
-; CHECK-NEXT:    store <4 x float> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd <4 x float> [[TMP0]], <float 1.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>
+; CHECK-NEXT:    store <4 x float> [[TMP1]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -668,21 +708,34 @@ entry:
 }
 
 define void @add1fn(ptr noalias %dst, ptr noalias %src) {
-; CHECK-LABEL: @add1fn(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 1
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[SRC]], align 4
-; CHECK-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 1
-; CHECK-NEXT:    store float [[TMP0]], ptr [[DST]], align 4
-; CHECK-NEXT:    [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 3
-; CHECK-NEXT:    [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 3
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x float>, ptr [[INCDEC_PTR]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = fadd <2 x float> [[TMP2]], <float 1.000000e+00, float 2.000000e+00>
-; CHECK-NEXT:    store <2 x float> [[TMP3]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[INCDEC_PTR5]], align 4
-; CHECK-NEXT:    [[ADD9:%.*]] = fadd float [[TMP5]], 3.000000e+00
-; CHECK-NEXT:    store float [[ADD9]], ptr [[INCDEC_PTR7]], align 4
-; CHECK-NEXT:    ret void
+; PADDING-LABEL: @add1fn(
+; PADDING-NEXT:  entry:
+; PADDING-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 1
+; PADDING-NEXT:    [[TMP0:%.*]] = load float, ptr [[SRC]], align 4
+; PADDING-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 1
+; PADDING-NEXT:    store float [[TMP0]], ptr [[DST]], align 4
+; PADDING-NEXT:    [[TMP1:%.*]] = load <3 x float>, ptr [[INCDEC_PTR]], align 4
+; PADDING-NEXT:    [[TMP2:%.*]] = shufflevector <3 x float> [[TMP1]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 poison>
+; PADDING-NEXT:    [[TMP3:%.*]] = fadd <4 x float> [[TMP2]], <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float undef>
+; PADDING-NEXT:    [[TMP4:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
+; PADDING-NEXT:    store <3 x float> [[TMP4]], ptr [[INCDEC_PTR1]], align 4
+; PADDING-NEXT:    ret void
+;
+; NO-PADDING-LABEL: @add1fn(
+; NO-PADDING-NEXT:  entry:
+; NO-PADDING-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 1
+; NO-PADDING-NEXT:    [[TMP0:%.*]] = load float, ptr [[SRC]], align 4
+; NO-PADDING-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 1
+; NO-PADDING-NEXT:    store float [[TMP0]], ptr [[DST]], align 4
+; NO-PADDING-NEXT:    [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 3
+; NO-PADDING-NEXT:    [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 3
+; NO-PADDING-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[INCDEC_PTR]], align 4
+; NO-PADDING-NEXT:    [[TMP2:%.*]] = fadd <2 x float> [[TMP1]], <float 1.000000e+00, float 2.000000e+00>
+; NO-PADDING-NEXT:    store <2 x float> [[TMP2]], ptr [[INCDEC_PTR1]], align 4
+; NO-PADDING-NEXT:    [[TMP3:%.*]] = load float, ptr [[INCDEC_PTR5]], align 4
+; NO-PADDING-NEXT:    [[ADD9:%.*]] = fadd float [[TMP3]], 3.000000e+00
+; NO-PADDING-NEXT:    store float [[ADD9]], ptr [[INCDEC_PTR7]], align 4
+; NO-PADDING-NEXT:    ret void
 ;
 entry:
   %incdec.ptr = getelementptr inbounds float, ptr %src, i64 1
@@ -717,9 +770,9 @@ define void @sub0fn(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[INCDEC_PTR]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 2
 ; CHECK-NEXT:    store float [[TMP1]], ptr [[INCDEC_PTR1]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x float>, ptr [[INCDEC_PTR2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x float> [[TMP3]], <float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    store <2 x float> [[TMP4]], ptr [[INCDEC_PTR4]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x float>, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd <2 x float> [[TMP2]], <float -2.000000e+00, float -3.000000e+00>
+; CHECK-NEXT:    store <2 x float> [[TMP3]], ptr [[INCDEC_PTR4]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -746,9 +799,9 @@ entry:
 define void @sub1fn(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @sub1fn(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = fadd <4 x float> [[TMP1]], <float 4.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    store <4 x float> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd <4 x float> [[TMP0]], <float 4.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>
+; CHECK-NEXT:    store <4 x float> [[TMP1]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -776,9 +829,9 @@ entry:
 define void @sub2fn(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-LABEL: @sub2fn(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = fadd <4 x float> [[TMP1]], <float -1.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    store <4 x float> [[TMP2]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd <4 x float> [[TMP0]], <float -1.000000e+00, float -1.000000e+00, float -2.000000e+00, float -3.000000e+00>
+; CHECK-NEXT:    store <4 x float> [[TMP1]], ptr [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -808,15 +861,15 @@ define void @mulfn(ptr noalias %dst, ptr noalias %src) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[INCDEC_PTR2:%.*]] = getelementptr inbounds float, ptr [[SRC:%.*]], i64 2
 ; CHECK-NEXT:    [[INCDEC_PTR4:%.*]] = getelementptr inbounds float, ptr [[DST:%.*]], i64 2
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[SRC]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = fmul <2 x float> [[TMP1]], <float 2.570000e+02, float -3.000000e+00>
-; CHECK-NEXT:    store <2 x float> [[TMP2]], ptr [[DST]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[SRC]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fmul <2 x float> [[TMP0]], <float 2.570000e+02, float -3.000000e+00>
+; CHECK-NEXT:    store <2 x float> [[TMP1]], ptr [[DST]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR5:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 3
-; CHECK-NEXT:    [[TMP4:%.*]] = load float, ptr [[INCDEC_PTR2]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load float, ptr [[INCDEC_PTR2]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR7:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 3
-; CHECK-NEXT:    store float [[TMP4]], ptr [[INCDEC_PTR4]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[INCDEC_PTR5]], align 4
-; CHECK-NEXT:    [[SUB9:%.*]] = fmul fast float [[TMP5]], -9.000000e+00
+; CHECK-NEXT:    store float [[TMP2]], ptr [[INCDEC_PTR4]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr [[INCDEC_PTR5]], align 4
+; CHECK-NEXT:    [[SUB9:%.*]] = fmul fast float [[TMP3]], -9.000000e+00
 ; CHECK-NEXT:    store float [[SUB9]], ptr [[INCDEC_PTR7]], align 4
 ; CHECK-NEXT:    ret void
 ;



More information about the llvm-commits mailing list