[llvm] 7b312e2 - [NFC][X86][AVX2] Add baseline CodeGen/CostModel tests for interleaved loads/stores of i16 w/ strides 2/3/4

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 25 15:13:37 PDT 2021


Author: Roman Lebedev
Date: 2021-04-26T01:13:07+03:00
New Revision: 7b312e228c36fc10cec4ecbd98a289f213a97518

URL: https://github.com/llvm/llvm-project/commit/7b312e228c36fc10cec4ecbd98a289f213a97518
DIFF: https://github.com/llvm/llvm-project/commit/7b312e228c36fc10cec4ecbd98a289f213a97518.diff

LOG: [NFC][X86][AVX2] Add baseline CodeGen/CostModel tests for interleaved loads/stores of i16 w/ strides 2/3/4

`X86TTIImpl::getInterleavedMemoryOpCostAVX2()` currently contains data
only for a handful of tuples. For now, at least add tests for a few more.

I'm guessing that we care how well the patterns codegen since
we use their presumed cost for vectorization decisions,
so i've added codegen tests too.

There's one really easy caveat for these codegen tests:
for interleaved load tests, we really have to ensure that the
deinterleaved vectors are escaped separately. Similarly for stores.

Added: 
    llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-2.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-3.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-4.ll
    llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-2.ll
    llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-3.ll
    llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-2.ll
new file mode 100644
index 000000000000..e3625caa51b8
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-2.ll
@@ -0,0 +1,44 @@
+; RUN: opt -loop-vectorize -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i16] zeroinitializer, align 128
+ at B = global [1024 x i16] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+; CHECK: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 9 for VF 2 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 17 for VF 4 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 41 for VF 8 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 114 for VF 16 For instruction:   %v0 = load i16, i16* %in0, align 2
+
+define void @test() {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+  %iv.0 = add nuw nsw i64 %iv, 0
+  %iv.1 = add nuw nsw i64 %iv, 1
+
+  %in0 = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.0
+  %in1 = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.1
+
+  %v0 = load i16, i16* %in0
+  %v1 = load i16, i16* %in1
+
+  %reduce.add.0 = add i16 %v0, %v1
+
+  %out = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.0
+  store i16 %reduce.add.0, i16* %out
+
+  %iv.next = add nuw nsw i64 %iv.0, 2
+  %cmp = icmp ult i64 %iv.next, 1024
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-3.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-3.ll
new file mode 100644
index 000000000000..d462a21a3cb1
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-3.ll
@@ -0,0 +1,48 @@
+; RUN: opt -loop-vectorize -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i16] zeroinitializer, align 128
+ at B = global [1024 x i16] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+; CHECK: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 14 for VF 2 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 30 for VF 4 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 58 for VF 8 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 171 for VF 16 For instruction:   %v0 = load i16, i16* %in0, align 2
+
+define void @test() {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+  %iv.0 = add nuw nsw i64 %iv, 0
+  %iv.1 = add nuw nsw i64 %iv, 1
+  %iv.2 = add nuw nsw i64 %iv, 2
+
+  %in0 = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.0
+  %in1 = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.1
+  %in2 = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.2
+
+  %v0 = load i16, i16* %in0
+  %v1 = load i16, i16* %in1
+  %v2 = load i16, i16* %in2
+
+  %reduce.add.0 = add i16 %v0, %v1
+  %reduce.add.1 = add i16 %reduce.add.0, %v2
+
+  %out = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.0
+  store i16 %reduce.add.1, i16* %out
+
+  %iv.next = add nuw nsw i64 %iv.0, 3
+  %cmp = icmp ult i64 %iv.next, 1024
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-4.ll
new file mode 100644
index 000000000000..4be191016207
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-4.ll
@@ -0,0 +1,52 @@
+; RUN: opt -loop-vectorize -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i16] zeroinitializer, align 128
+ at B = global [1024 x i16] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+; CHECK: LV: Found an estimated cost of 1 for VF 1 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 17 for VF 2 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 41 for VF 4 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 82 for VF 8 For instruction:   %v0 = load i16, i16* %in0, align 2
+; CHECK: LV: Found an estimated cost of 228 for VF 16 For instruction:   %v0 = load i16, i16* %in0, align 2
+
+define void @test() {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+  %iv.0 = add nuw nsw i64 %iv, 0
+  %iv.1 = add nuw nsw i64 %iv, 1
+  %iv.2 = add nuw nsw i64 %iv, 2
+  %iv.3 = add nuw nsw i64 %iv, 3
+
+  %in0 = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.0
+  %in1 = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.1
+  %in2 = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.2
+  %in3 = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.3
+
+  %v0 = load i16, i16* %in0
+  %v1 = load i16, i16* %in1
+  %v2 = load i16, i16* %in2
+  %v3 = load i16, i16* %in3
+
+  %reduce.add.0 = add i16 %v0, %v1
+  %reduce.add.1 = add i16 %reduce.add.0, %v2
+  %reduce.add.2 = add i16 %reduce.add.1, %v3
+
+  %out = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.0
+  store i16 %reduce.add.2, i16* %out
+
+  %iv.next = add nuw nsw i64 %iv.0, 4
+  %cmp = icmp ult i64 %iv.next, 1024
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-2.ll
new file mode 100644
index 000000000000..3f1347c64c41
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-2.ll
@@ -0,0 +1,45 @@
+; RUN: opt -loop-vectorize -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i16] zeroinitializer, align 128
+ at B = global [1024 x i16] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+; CHECK: LV: Found an estimated cost of 1 for VF 1 For instruction:   store i16 %v1, i16* %out1, align 2
+; CHECK: LV: Found an estimated cost of 9 for VF 2 For instruction:   store i16 %v1, i16* %out1, align 2
+; CHECK: LV: Found an estimated cost of 17 for VF 4 For instruction:   store i16 %v1, i16* %out1, align 2
+; CHECK: LV: Found an estimated cost of 49 for VF 8 For instruction:   store i16 %v1, i16* %out1, align 2
+; CHECK: LV: Found an estimated cost of 114 for VF 16 For instruction:   store i16 %v1, i16* %out1, align 2
+
+define void @test() {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+  %iv.0 = add nuw nsw i64 %iv, 0
+  %iv.1 = add nuw nsw i64 %iv, 1
+
+  %in = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.0
+  %v = load i16, i16* %in
+
+  %v0 = add i16 %v, 0
+  %v1 = add i16 %v, 1
+
+  %out0 = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.0
+  %out1 = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.1
+
+  store i16 %v0, i16* %out0
+  store i16 %v1, i16* %out1
+
+  %iv.next = add nuw nsw i64 %iv.0, 2
+  %cmp = icmp ult i64 %iv.next, 1024
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-3.ll b/llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-3.ll
new file mode 100644
index 000000000000..b3bdca84ca33
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-3.ll
@@ -0,0 +1,49 @@
+; RUN: opt -loop-vectorize -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i16] zeroinitializer, align 128
+ at B = global [1024 x i16] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+; CHECK: LV: Found an estimated cost of 1 for VF 1 For instruction:   store i16 %v2, i16* %out2, align 2
+; CHECK: LV: Found an estimated cost of 14 for VF 2 For instruction:   store i16 %v2, i16* %out2, align 2
+; CHECK: LV: Found an estimated cost of 34 for VF 4 For instruction:   store i16 %v2, i16* %out2, align 2
+; CHECK: LV: Found an estimated cost of 66 for VF 8 For instruction:   store i16 %v2, i16* %out2, align 2
+; CHECK: LV: Found an estimated cost of 171 for VF 16 For instruction:   store i16 %v2, i16* %out2, align 2
+
+define void @test() {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+  %iv.0 = add nuw nsw i64 %iv, 0
+  %iv.1 = add nuw nsw i64 %iv, 1
+  %iv.2 = add nuw nsw i64 %iv, 2
+
+  %in = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.0
+  %v = load i16, i16* %in
+
+  %v0 = add i16 %v, 0
+  %v1 = add i16 %v, 1
+  %v2 = add i16 %v, 2
+
+  %out0 = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.0
+  %out1 = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.1
+  %out2 = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.2
+
+  store i16 %v0, i16* %out0
+  store i16 %v1, i16* %out1
+  store i16 %v2, i16* %out2
+
+  %iv.next = add nuw nsw i64 %iv.0, 3
+  %cmp = icmp ult i64 %iv.next, 1024
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-4.ll
new file mode 100644
index 000000000000..9e03bde324f9
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-store-i16-stride-4.ll
@@ -0,0 +1,53 @@
+; RUN: opt -loop-vectorize -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i16] zeroinitializer, align 128
+ at B = global [1024 x i16] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+; CHECK: LV: Found an estimated cost of 1 for VF 1 For instruction:   store i16 %v3, i16* %out3, align 2
+; CHECK: LV: Found an estimated cost of 17 for VF 2 For instruction:   store i16 %v3, i16* %out3, align 2
+; CHECK: LV: Found an estimated cost of 49 for VF 4 For instruction:   store i16 %v3, i16* %out3, align 2
+; CHECK: LV: Found an estimated cost of 98 for VF 8 For instruction:   store i16 %v3, i16* %out3, align 2
+; CHECK: LV: Found an estimated cost of 228 for VF 16 For instruction:   store i16 %v3, i16* %out3, align 2
+
+define void @test() {
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+  %iv.0 = add nuw nsw i64 %iv, 0
+  %iv.1 = add nuw nsw i64 %iv, 1
+  %iv.2 = add nuw nsw i64 %iv, 2
+  %iv.3 = add nuw nsw i64 %iv, 3
+
+  %in = getelementptr inbounds [1024 x i16], [1024 x i16]* @A, i64 0, i64 %iv.0
+  %v = load i16, i16* %in
+
+  %v0 = add i16 %v, 0
+  %v1 = add i16 %v, 1
+  %v2 = add i16 %v, 2
+  %v3 = add i16 %v, 3
+
+  %out0 = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.0
+  %out1 = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.1
+  %out2 = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.2
+  %out3 = getelementptr inbounds [1024 x i16], [1024 x i16]* @B, i64 0, i64 %iv.3
+
+  store i16 %v0, i16* %out0
+  store i16 %v1, i16* %out1
+  store i16 %v2, i16* %out2
+  store i16 %v3, i16* %out3
+
+  %iv.next = add nuw nsw i64 %iv.0, 4
+  %cmp = icmp ult i64 %iv.next, 1024
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll
new file mode 100644
index 000000000000..96a79c4abed8
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll
@@ -0,0 +1,120 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2,AVX2-SLOW %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-shuffle | FileCheck --check-prefixes=AVX2,AVX2-FAST %s
+
+; These patterns are produced by LoopVectorizer for interleaved loads.
+
+define void @vf2(<4 x i16>* %in.vec, <2 x i16>* %out.vec0, <2 x i16>* %out.vec1) nounwind {
+; AVX2-LABEL: vf2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
+; AVX2-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
+; AVX2-NEXT:    vmovd %xmm1, (%rsi)
+; AVX2-NEXT:    vmovd %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+  %wide.vec = load <4 x i16>, <4 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <4 x i16> %wide.vec, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
+  %strided.vec1 = shufflevector <4 x i16> %wide.vec, <4 x i16> poison, <2 x i32> <i32 1, i32 3>
+
+  store <2 x i16> %strided.vec0, <2 x i16>* %out.vec0, align 32
+  store <2 x i16> %strided.vec1, <2 x i16>* %out.vec1, align 32
+
+  ret void
+}
+
+define void @vf4(<8 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1) nounwind {
+; AVX2-LABEL: vf4:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vmovq %xmm1, (%rsi)
+; AVX2-NEXT:    vmovq %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+  %wide.vec = load <8 x i16>, <8 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %strided.vec1 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+
+  store <4 x i16> %strided.vec0, <4 x i16>* %out.vec0, align 32
+  store <4 x i16> %strided.vec1, <4 x i16>* %out.vec1, align 32
+
+  ret void
+}
+
+define void @vf8(<16 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1) nounwind {
+; AVX2-LABEL: vf8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
+; AVX2-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
+; AVX2-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT:    vmovdqa %xmm0, (%rsi)
+; AVX2-NEXT:    vmovdqa %xmm1, (%rdx)
+; AVX2-NEXT:    retq
+  %wide.vec = load <16 x i16>, <16 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %strided.vec1 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+
+  store <8 x i16> %strided.vec0, <8 x i16>* %out.vec0, align 32
+  store <8 x i16> %strided.vec1, <8 x i16>* %out.vec1, align 32
+
+  ret void
+}
+
+define void @vf16(<32 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.vec1) nounwind {
+; AVX2-SLOW-LABEL: vf16:
+; AVX2-SLOW:       # %bb.0:
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
+; AVX2-SLOW-NEXT:    vshufps {{.*#+}} ymm2 = ymm3[0,2],ymm2[0,2],ymm3[4,6],ymm2[4,6]
+; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-SLOW-NEXT:    vmovaps %ymm2, (%rsi)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%rdx)
+; AVX2-SLOW-NEXT:    vzeroupper
+; AVX2-SLOW-NEXT:    retq
+;
+; AVX2-FAST-LABEL: vf16:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm3
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6]
+; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-FAST-NEXT:    vmovaps %ymm2, (%rsi)
+; AVX2-FAST-NEXT:    vmovdqa %ymm0, (%rdx)
+; AVX2-FAST-NEXT:    vzeroupper
+; AVX2-FAST-NEXT:    retq
+  %wide.vec = load <32 x i16>, <32 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %strided.vec1 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+
+  store <16 x i16> %strided.vec0, <16 x i16>* %out.vec0, align 32
+  store <16 x i16> %strided.vec1, <16 x i16>* %out.vec1, align 32
+
+  ret void
+}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
new file mode 100644
index 000000000000..277b8a7be778
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
@@ -0,0 +1,176 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2,AVX2-SLOW %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-shuffle | FileCheck --check-prefixes=AVX2,AVX2-FAST %s
+
+; These patterns are produced by LoopVectorizer for interleaved loads.
+
+define void @vf2(<6 x i16>* %in.vec, <2 x i16>* %out.vec0, <2 x i16>* %out.vec1, <2 x i16>* %out.vec2) nounwind {
+; AVX2-SLOW-LABEL: vf2:
+; AVX2-SLOW:       # %bb.0:
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovd %xmm1, (%rsi)
+; AVX2-SLOW-NEXT:    vmovd %xmm2, (%rdx)
+; AVX2-SLOW-NEXT:    vmovd %xmm0, (%rcx)
+; AVX2-SLOW-NEXT:    retq
+;
+; AVX2-FAST-LABEL: vf2:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[2,3,8,9,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vmovd %xmm1, (%rsi)
+; AVX2-FAST-NEXT:    vmovd %xmm2, (%rdx)
+; AVX2-FAST-NEXT:    vmovd %xmm0, (%rcx)
+; AVX2-FAST-NEXT:    retq
+  %wide.vec = load <6 x i16>, <6 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <6 x i16> %wide.vec, <6 x i16> poison, <2 x i32> <i32 0, i32 3>
+  %strided.vec1 = shufflevector <6 x i16> %wide.vec, <6 x i16> poison, <2 x i32> <i32 1, i32 4>
+  %strided.vec2 = shufflevector <6 x i16> %wide.vec, <6 x i16> poison, <2 x i32> <i32 2, i32 5>
+
+  store <2 x i16> %strided.vec0, <2 x i16>* %out.vec0, align 32
+  store <2 x i16> %strided.vec1, <2 x i16>* %out.vec1, align 32
+  store <2 x i16> %strided.vec2, <2 x i16>* %out.vec2, align 32
+
+  ret void
+}
+
+define void @vf4(<12 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1, <4 x i16>* %out.vec2) nounwind {
+; AVX2-SLOW-LABEL: vf4:
+; AVX2-SLOW:       # %bb.0:
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,6,7,12,13,2,3,u,u,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[2,3,8,9,14,15,4,5,u,u,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-SLOW-NEXT:    vmovq %xmm2, (%rsi)
+; AVX2-SLOW-NEXT:    vmovq %xmm3, (%rdx)
+; AVX2-SLOW-NEXT:    vmovq %xmm0, (%rcx)
+; AVX2-SLOW-NEXT:    retq
+;
+; AVX2-FAST-LABEL: vf4:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,6,7,12,13,2,3,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[2,3,8,9,14,15,4,5,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-FAST-NEXT:    vmovq %xmm2, (%rsi)
+; AVX2-FAST-NEXT:    vmovq %xmm3, (%rdx)
+; AVX2-FAST-NEXT:    vmovq %xmm0, (%rcx)
+; AVX2-FAST-NEXT:    retq
+  %wide.vec = load <12 x i16>, <12 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+  %strided.vec1 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+  %strided.vec2 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
+
+  store <4 x i16> %strided.vec0, <4 x i16>* %out.vec0, align 32
+  store <4 x i16> %strided.vec1, <4 x i16>* %out.vec1, align 32
+  store <4 x i16> %strided.vec2, <4 x i16>* %out.vec2, align 32
+
+  ret void
+}
+
+define void @vf8(<24 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1, <8 x i16>* %out.vec2) nounwind {
+; AVX2-LABEL: vf8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX2-NEXT:    vmovdqa 32(%rdi), %xmm2
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7]
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6],xmm1[7]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11]
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm4 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm1[2],xmm4[3,4],xmm1[5],xmm4[6,7]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13]
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6],xmm2[7]
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15]
+; AVX2-NEXT:    vmovdqa %xmm3, (%rsi)
+; AVX2-NEXT:    vmovdqa %xmm4, (%rdx)
+; AVX2-NEXT:    vmovdqa %xmm0, (%rcx)
+; AVX2-NEXT:    retq
+  %wide.vec = load <24 x i16>, <24 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
+  %strided.vec1 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
+  %strided.vec2 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
+
+  store <8 x i16> %strided.vec0, <8 x i16>* %out.vec0, align 32
+  store <8 x i16> %strided.vec1, <8 x i16>* %out.vec1, align 32
+  store <8 x i16> %strided.vec2, <8 x i16>* %out.vec2, align 32
+
+  ret void
+}
+
+define void @vf16(<48 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.vec1, <16 x i16>* %out.vec2) nounwind {
+; AVX2-LABEL: vf16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13,14],ymm3[15]
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
+; AVX2-NEXT:    vmovdqa 80(%rdi), %xmm3
+; AVX2-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11]
+; AVX2-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm5 = ymm2[0,1,2],ymm5[3,4,5,6,7],ymm2[8,9,10],ymm5[11,12,13,14,15]
+; AVX2-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm5 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-NEXT:    vpblendvb %ymm5, %ymm1, %ymm0, %ymm5
+; AVX2-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3,4],ymm6[5],ymm5[6,7,8,9],ymm6[10],ymm5[11,12],ymm6[13],ymm5[14,15]
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm6 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13]
+; AVX2-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm6 = ymm5[0,1,2],ymm6[3,4,5,6,7],ymm5[8,9,10],ymm6[11,12,13,14,15]
+; AVX2-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,6,7,4]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
+; AVX2-NEXT:    vpblendvb %ymm6, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,0,1,6,7,12,13,2,3,8,9,14,15]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-NEXT:    vmovdqa %ymm2, (%rsi)
+; AVX2-NEXT:    vmovdqa %ymm5, (%rdx)
+; AVX2-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+  %wide.vec = load <48 x i16>, <48 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
+  %strided.vec1 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
+  %strided.vec2 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
+
+  store <16 x i16> %strided.vec0, <16 x i16>* %out.vec0, align 32
+  store <16 x i16> %strided.vec1, <16 x i16>* %out.vec1, align 32
+  store <16 x i16> %strided.vec2, <16 x i16>* %out.vec2, align 32
+
+  ret void
+}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
new file mode 100644
index 000000000000..c1dd21517ae3
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
@@ -0,0 +1,412 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2,AVX2-SLOW %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-shuffle | FileCheck --check-prefixes=AVX2,AVX2-FAST %s
+
+; These patterns are produced by LoopVectorizer for interleaved loads.
+
+define void @vf2(<8 x i16>* %in.vec, <2 x i16>* %out.vec0, <2 x i16>* %out.vec1, <2 x i16>* %out.vec2, <2 x i16>* %out.vec3) nounwind {
+; AVX2-LABEL: vf2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
+; AVX2-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm1[0,2,2,3,4,5,6,7]
+; AVX2-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,3,2,3,4,5,6,7]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX2-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[2,0,2,3,4,5,6,7]
+; AVX2-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
+; AVX2-NEXT:    vmovd %xmm2, (%rsi)
+; AVX2-NEXT:    vmovd %xmm1, (%rdx)
+; AVX2-NEXT:    vmovd %xmm3, (%rcx)
+; AVX2-NEXT:    vmovd %xmm0, (%r8)
+; AVX2-NEXT:    retq
+  %wide.vec = load <8 x i16>, <8 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <2 x i32> <i32 0, i32 4>
+  %strided.vec1 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <2 x i32> <i32 1, i32 5>
+  %strided.vec2 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <2 x i32> <i32 2, i32 6>
+  %strided.vec3 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <2 x i32> <i32 3, i32 7>
+
+  store <2 x i16> %strided.vec0, <2 x i16>* %out.vec0, align 32
+  store <2 x i16> %strided.vec1, <2 x i16>* %out.vec1, align 32
+  store <2 x i16> %strided.vec2, <2 x i16>* %out.vec2, align 32
+  store <2 x i16> %strided.vec3, <2 x i16>* %out.vec3, align 32
+
+  ret void
+}
+
+define void @vf4(<16 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1, <4 x i16>* %out.vec2, <4 x i16>* %out.vec3) nounwind {
+; AVX2-SLOW-LABEL: vf4:
+; AVX2-SLOW:       # %bb.0:
+; AVX2-SLOW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0],xmm0[1,2,3],xmm2[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vpackusdw %xmm0, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[2,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm1[2,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX2-SLOW-NEXT:    vmovq %xmm0, (%rsi)
+; AVX2-SLOW-NEXT:    vmovq %xmm3, (%rdx)
+; AVX2-SLOW-NEXT:    vmovq %xmm4, (%rcx)
+; AVX2-SLOW-NEXT:    vmovq %xmm1, (%r8)
+; AVX2-SLOW-NEXT:    retq
+;
+; AVX2-FAST-LABEL: vf4:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0],xmm0[1,2,3],xmm2[4],xmm0[5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-FAST-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vpackusdw %xmm0, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm4
+; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm3
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[2,0,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm1[2,0,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX2-FAST-NEXT:    vmovq %xmm0, (%rsi)
+; AVX2-FAST-NEXT:    vmovq %xmm3, (%rdx)
+; AVX2-FAST-NEXT:    vmovq %xmm4, (%rcx)
+; AVX2-FAST-NEXT:    vmovq %xmm1, (%r8)
+; AVX2-FAST-NEXT:    retq
+  %wide.vec = load <16 x i16>, <16 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %strided.vec1 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
+  %strided.vec2 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
+  %strided.vec3 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
+
+  store <4 x i16> %strided.vec0, <4 x i16>* %out.vec0, align 32
+  store <4 x i16> %strided.vec1, <4 x i16>* %out.vec1, align 32
+  store <4 x i16> %strided.vec2, <4 x i16>* %out.vec2, align 32
+  store <4 x i16> %strided.vec3, <4 x i16>* %out.vec3, align 32
+
+  ret void
+}
+
+define void @vf8(<32 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1, <8 x i16>* %out.vec2, <8 x i16>* %out.vec3) nounwind {
+; AVX2-SLOW-LABEL: vf8:
+; AVX2-SLOW:       # %bb.0:
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm2
+; AVX2-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm3
+; AVX2-SLOW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; AVX2-SLOW-NEXT:    vpackusdw %xmm5, %xmm6, %xmm5
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX2-SLOW-NEXT:    vpackusdw %xmm6, %xmm4, %xmm4
+; AVX2-SLOW-NEXT:    vpackusdw %xmm5, %xmm4, %xmm8
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm3[0,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm2[0,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[2,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm0[2,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm8, (%rsi)
+; AVX2-SLOW-NEXT:    vmovdqa %xmm5, (%rdx)
+; AVX2-SLOW-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %xmm0, (%r8)
+; AVX2-SLOW-NEXT:    retq
+;
+; AVX2-FAST-LABEL: vf8:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
+; AVX2-FAST-NEXT:    vmovdqa 48(%rdi), %xmm3
+; AVX2-FAST-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; AVX2-FAST-NEXT:    vpackusdw %xmm5, %xmm6, %xmm5
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX2-FAST-NEXT:    vpackusdw %xmm6, %xmm4, %xmm4
+; AVX2-FAST-NEXT:    vpackusdw %xmm5, %xmm4, %xmm8
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm6
+; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm5
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm1, %xmm7
+; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm6
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm3[0,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm2[0,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[2,0,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm0[2,0,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
+; AVX2-FAST-NEXT:    vmovdqa %xmm8, (%rsi)
+; AVX2-FAST-NEXT:    vmovdqa %xmm5, (%rdx)
+; AVX2-FAST-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %xmm0, (%r8)
+; AVX2-FAST-NEXT:    retq
+  %wide.vec = load <32 x i16>, <32 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
+  %strided.vec1 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
+  %strided.vec2 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
+  %strided.vec3 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
+
+  store <8 x i16> %strided.vec0, <8 x i16>* %out.vec0, align 32
+  store <8 x i16> %strided.vec1, <8 x i16>* %out.vec1, align 32
+  store <8 x i16> %strided.vec2, <8 x i16>* %out.vec2, align 32
+  store <8 x i16> %strided.vec3, <8 x i16>* %out.vec3, align 32
+
+  ret void
+}
+
+define void @vf16(<64 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.vec1, <16 x i16>* %out.vec2, <16 x i16>* %out.vec3) nounwind {
+; AVX2-SLOW-LABEL: vf16:
+; AVX2-SLOW:       # %bb.0:
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm10
+; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm11
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm12
+; AVX2-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm13
+; AVX2-SLOW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm13[0],xmm0[1,2,3],xmm13[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm12[0],xmm0[1,2,3],xmm12[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpackusdw %xmm2, %xmm5, %xmm2
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm11[0],xmm0[1,2,3],xmm11[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm10[0],xmm0[1,2,3],xmm10[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpackusdw %xmm5, %xmm6, %xmm5
+; AVX2-SLOW-NEXT:    vpackusdw %xmm2, %xmm5, %xmm8
+; AVX2-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm6
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0],xmm0[1,2,3],xmm6[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm7
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0],xmm0[1,2,3],xmm7[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpackusdw %xmm2, %xmm5, %xmm2
+; AVX2-SLOW-NEXT:    vpackusdw %xmm2, %xmm2, %xmm2
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm9
+; AVX2-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm2
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm0[1,2,3],xmm2[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm5
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm0[1,2,3],xmm5[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vpackusdw %xmm0, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm6[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,3,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm13[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm12[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm11[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm10[0,2,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm6[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[0,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm7[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm8[0,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[2,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[2,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm4[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm13[0,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm12[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm11[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[2,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm10[3,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[2,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[0,1,3,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm8[0,1,3,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[3,1,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[0,1,3,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm7[0,1,3,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[3,1,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm2, (%rsi)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm9, (%rdx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm1, (%r8)
+; AVX2-SLOW-NEXT:    vzeroupper
+; AVX2-SLOW-NEXT:    retq
+;
+; AVX2-FAST-LABEL: vf16:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vmovdqa 64(%rdi), %ymm8
+; AVX2-FAST-NEXT:    vmovdqa 96(%rdi), %ymm13
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm14
+; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm3
+; AVX2-FAST-NEXT:    vmovdqa 48(%rdi), %xmm5
+; AVX2-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm5[0],xmm2[1,2,3],xmm5[4],xmm2[5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm3[0],xmm2[1,2,3],xmm3[4],xmm2[5,6,7]
+; AVX2-FAST-NEXT:    vpackusdw %xmm7, %xmm4, %xmm4
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0],xmm2[1,2,3],xmm14[4],xmm2[5,6,7]
+; AVX2-FAST-NEXT:    vpackusdw %xmm7, %xmm2, %xmm2
+; AVX2-FAST-NEXT:    vpackusdw %xmm4, %xmm2, %xmm2
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,2,2,3,0,2,4,6]
+; AVX2-FAST-NEXT:    vpermd %ymm13, %ymm4, %ymm7
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29]
+; AVX2-FAST-NEXT:    vpermd %ymm8, %ymm4, %ymm4
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm12
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3,4,5],ymm10[6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm2[0,1,2,3],ymm10[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,2,3,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm6
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [2,3,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm1, %xmm0
+; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm14, %xmm6
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
+; AVX2-FAST-NEXT:    vpshufb %ymm15, %ymm7, %ymm6
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %ymm12, %ymm4, %ymm4
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [1,3,2,3,1,3,5,7]
+; AVX2-FAST-NEXT:    vpermd %ymm13, %ymm0, %ymm4
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29]
+; AVX2-FAST-NEXT:    vpermd %ymm8, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm8
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm6[6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[0,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm3[0,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[2,0,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm14[3,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,0,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm11[2,3]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %ymm15, %ymm4, %ymm4
+; AVX2-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm4[6,7]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[0,1,3,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm6[3,1,2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa %ymm10, (%rsi)
+; AVX2-FAST-NEXT:    vmovdqa %ymm9, (%rdx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm2, (%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm0, (%r8)
+; AVX2-FAST-NEXT:    vzeroupper
+; AVX2-FAST-NEXT:    retq
+  %wide.vec = load <64 x i16>, <64 x i16>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
+  %strided.vec1 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
+  %strided.vec2 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
+  %strided.vec3 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63>
+
+  store <16 x i16> %strided.vec0, <16 x i16>* %out.vec0, align 32
+  store <16 x i16> %strided.vec1, <16 x i16>* %out.vec1, align 32
+  store <16 x i16> %strided.vec2, <16 x i16>* %out.vec2, align 32
+  store <16 x i16> %strided.vec3, <16 x i16>* %out.vec3, align 32
+
+  ret void
+}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-2.ll
new file mode 100644
index 000000000000..6ba1e08bb52f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-2.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2 %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-shuffle | FileCheck --check-prefixes=AVX2 %s
+
+; These patterns are produced by LoopVectorizer for interleaved stores.
+
+define void @vf2(<2 x i16>* %in.vecptr0, <2 x i16>* %in.vecptr1, <4 x i16>* %out.vec) nounwind {
+; AVX2-LABEL: vf2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX2-NEXT:    vmovq %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+  %in.vec0 = load <2 x i16>, <2 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <2 x i16>, <2 x i16>* %in.vecptr1, align 32
+
+  %concat01 = shufflevector <2 x i16> %in.vec0, <2 x i16> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %interleaved.vec = shufflevector <4 x i16> %concat01, <4 x i16> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+
+  store <4 x i16> %interleaved.vec, <4 x i16>* %out.vec, align 32
+
+  ret void
+}
+
+define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <8 x i16>* %out.vec) nounwind {
+; AVX2-LABEL: vf4:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX2-NEXT:    retq
+  %in.vec0 = load <4 x i16>, <4 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <4 x i16>, <4 x i16>* %in.vecptr1, align 32
+
+  %concat01 = shufflevector <4 x i16> %in.vec0, <4 x i16> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %interleaved.vec = shufflevector <8 x i16> %concat01, <8 x i16> poison, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+
+  store <8 x i16> %interleaved.vec, <8 x i16>* %out.vec, align 32
+
+  ret void
+}
+
+define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <16 x i16>* %out.vec) nounwind {
+; AVX2-LABEL: vf8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,8,9,2,3,10,11,4,5,12,13,6,7,14,15,16,17,24,25,18,19,26,27,20,21,28,29,22,23,30,31]
+; AVX2-NEXT:    vmovdqa %ymm0, (%rdx)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+  %in.vec0 = load <8 x i16>, <8 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <8 x i16>, <8 x i16>* %in.vecptr1, align 32
+
+  %concat01 = shufflevector <8 x i16> %in.vec0, <8 x i16> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %interleaved.vec = shufflevector <16 x i16> %concat01, <16 x i16> poison, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+
+  store <16 x i16> %interleaved.vec, <16 x i16>* %out.vec, align 32
+
+  ret void
+}
+
+define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <32 x i16>* %out.vec) nounwind {
+; AVX2-LABEL: vf16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX2-NEXT:    vmovdqa 16(%rsi), %xmm1
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX2-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX2-NEXT:    vmovdqa %xmm1, 32(%rdx)
+; AVX2-NEXT:    vmovdqa %xmm2, 48(%rdx)
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdx)
+; AVX2-NEXT:    vmovdqa %xmm4, 16(%rdx)
+; AVX2-NEXT:    retq
+  %in.vec0 = load <16 x i16>, <16 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <16 x i16>, <16 x i16>* %in.vecptr1, align 32
+
+  %concat01 = shufflevector <16 x i16> %in.vec0, <16 x i16> %in.vec1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %interleaved.vec = shufflevector <32 x i16> %concat01, <32 x i16> poison, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+
+  store <32 x i16> %interleaved.vec, <32 x i16>* %out.vec, align 32
+
+  ret void
+}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
new file mode 100644
index 000000000000..ad49282400f9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
@@ -0,0 +1,216 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2,AVX2-SLOW %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-shuffle | FileCheck --check-prefixes=AVX2,AVX2-FAST %s
+
+; These patterns are produced by LoopVectorizer for interleaved stores.
+
+define void @vf2(<2 x i16>* %in.vecptr0, <2 x i16>* %in.vecptr1, <2 x i16>* %in.vecptr2, <6 x i16>* %out.vec) nounwind {
+; AVX2-LABEL: vf2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,2,3,4,5,12,13,6,7,4,5,6,7]
+; AVX2-NEXT:    vpextrd $2, %xmm0, 8(%rcx)
+; AVX2-NEXT:    vmovq %xmm0, (%rcx)
+; AVX2-NEXT:    retq
+  %in.vec0 = load <2 x i16>, <2 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <2 x i16>, <2 x i16>* %in.vecptr1, align 32
+  %in.vec2 = load <2 x i16>, <2 x i16>* %in.vecptr2, align 32
+
+  %concat01 = shufflevector <2 x i16> %in.vec0, <2 x i16> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %concat2u = shufflevector <2 x i16> %in.vec2, <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+  %concat012u = shufflevector <4 x i16> %concat01, <4 x i16> %concat2u, <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
+  %interleaved.vec = shufflevector <6 x i16> %concat012u, <6 x i16> poison, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
+
+  store <6 x i16> %interleaved.vec, <6 x i16>* %out.vec, align 32
+
+  ret void
+}
+
+define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <4 x i16>* %in.vecptr2, <12 x i16>* %out.vec) nounwind {
+; AVX2-LABEL: vf4:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9],zero,zero,ymm0[2,3,10,11],zero,zero,ymm0[4,5,12,13,20,21],zero,zero,zero,zero,ymm0[22,23],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0,1],zero,zero,zero,zero,ymm0[2,3],zero,zero,zero,zero,zero,zero,ymm0[22,23,30,31],zero,zero,ymm0[u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vmovq %xmm1, 16(%rcx)
+; AVX2-NEXT:    vmovdqa %xmm0, (%rcx)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+  %in.vec0 = load <4 x i16>, <4 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <4 x i16>, <4 x i16>* %in.vecptr1, align 32
+  %in.vec2 = load <4 x i16>, <4 x i16>* %in.vecptr2, align 32
+
+  %concat01 = shufflevector <4 x i16> %in.vec0, <4 x i16> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %concat2u = shufflevector <4 x i16> %in.vec2, <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+  %concat012u = shufflevector <8 x i16> %concat01, <8 x i16> %concat2u, <12 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+  %interleaved.vec = shufflevector <12 x i16> %concat012u, <12 x i16> poison, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+
+  store <12 x i16> %interleaved.vec, <12 x i16>* %out.vec, align 32
+
+  ret void
+}
+
+define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.vecptr2, <24 x i16>* %out.vec) nounwind {
+; AVX2-SLOW-LABEL: vf8:
+; AVX2-SLOW:       # %bb.0:
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm3
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[0,1,u,u,6,7,2,3,u,u,8,9,4,5,u,u,16,17,u,u,22,23,18,19,u,u,24,25,20,21,u,u]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,22,23,u,u,u,u,24,25,u,u,u,u,26,27]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,0,0,u,1,1,u,2>
+; AVX2-SLOW-NEXT:    vpermd %ymm2, %ymm4, %ymm4
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm0, 32(%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm3, (%rcx)
+; AVX2-SLOW-NEXT:    vzeroupper
+; AVX2-SLOW-NEXT:    retq
+;
+; AVX2-FAST-LABEL: vf8:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-FAST-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX2-FAST-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm3
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,0,0,u,1,1,u,2>
+; AVX2-FAST-NEXT:    vpermd %ymm2, %ymm4, %ymm4
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,4,1,5,1,5,2,6]
+; AVX2-FAST-NEXT:    vpermd %ymm3, %ymm5, %ymm3
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,4,5,u,u,2,3,6,7,u,u,8,9,12,13,u,u,18,19,22,23,u,u,24,25,28,29,u,u,26,27]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX2-FAST-NEXT:    vmovdqa %xmm0, 32(%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm3, (%rcx)
+; AVX2-FAST-NEXT:    vzeroupper
+; AVX2-FAST-NEXT:    retq
+  %in.vec0 = load <8 x i16>, <8 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <8 x i16>, <8 x i16>* %in.vecptr1, align 32
+  %in.vec2 = load <8 x i16>, <8 x i16>* %in.vecptr2, align 32
+
+  %concat01 = shufflevector <8 x i16> %in.vec0, <8 x i16> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %concat2u = shufflevector <8 x i16> %in.vec2, <8 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %concat012u = shufflevector <16 x i16> %concat01, <16 x i16> %concat2u, <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+  %interleaved.vec = shufflevector <24 x i16> %concat012u, <24 x i16> poison, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
+
+  store <24 x i16> %interleaved.vec, <24 x i16>* %out.vec, align 32
+
+  ret void
+}
+
+define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>* %in.vecptr2, <48 x i16>* %out.vec) nounwind {
+; AVX2-SLOW-LABEL: vf16:
+; AVX2-SLOW:       # %bb.0:
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[1,1,2,2]
+; AVX2-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[3,3,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
+; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm2, %ymm2
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,0,0,u,1,1,u,2>
+; AVX2-SLOW-NEXT:    vpermd %ymm1, %ymm4, %ymm4
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm2, %ymm4, %ymm2
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,1,2,2]
+; AVX2-SLOW-NEXT:    vmovdqa 16(%rdx), %xmm6
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3],xmm4[4],xmm7[5,6],xmm4[7]
+; AVX2-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,u,3,3,u,4,4,u>
+; AVX2-SLOW-NEXT:    vpermd %ymm1, %ymm4, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm4, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,3,3,u,4,4,u,5>
+; AVX2-SLOW-NEXT:    vpermd (%rdi), %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm3, 64(%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%rcx)
+; AVX2-SLOW-NEXT:    vzeroupper
+; AVX2-SLOW-NEXT:    retq
+;
+; AVX2-FAST-LABEL: vf16:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX2-FAST-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[u,u,u,u,6,7,u,u,u,u,8,9,u,u,u,u]
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm5
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[1,1,2,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2],xmm6[3,4],xmm3[5],xmm6[6,7]
+; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,0,0,u,1,1,u,2>
+; AVX2-FAST-NEXT:    vpermd %ymm1, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
+; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm2, %ymm3, %ymm2
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[1,1,2,2]
+; AVX2-FAST-NEXT:    vmovdqa 16(%rdx), %xmm6
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2,3],xmm3[4],xmm7[5,6],xmm3[7]
+; AVX2-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[8,9,4,5,10,11,6,7,8,9,14,15,12,13,12,13]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm3, %ymm5, %ymm3
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <2,u,3,3,u,4,4,u>
+; AVX2-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm1
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[10,11,0,1,12,13,12,13,2,3,2,3,14,15,4,5,26,27,16,17,28,29,28,29,18,19,18,19,30,31,20,21]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-FAST-NEXT:    vpblendvb %ymm5, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,3,3,u,4,4,u,5>
+; AVX2-FAST-NEXT:    vpermd (%rdi), %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm4, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm3, 64(%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm2, (%rcx)
+; AVX2-FAST-NEXT:    vzeroupper
+; AVX2-FAST-NEXT:    retq
+  %in.vec0 = load <16 x i16>, <16 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <16 x i16>, <16 x i16>* %in.vecptr1, align 32
+  %in.vec2 = load <16 x i16>, <16 x i16>* %in.vecptr2, align 32
+
+  %concat01 = shufflevector <16 x i16> %in.vec0, <16 x i16> %in.vec1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %concat2u = shufflevector <16 x i16> %in.vec2, <16 x i16> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %concat012u = shufflevector <32 x i16> %concat01, <32 x i16> %concat2u, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
+  %interleaved.vec = shufflevector <48 x i16> %concat012u, <48 x i16> poison, <48 x i32> <i32 0, i32 16, i32 32, i32 1, i32 17, i32 33, i32 2, i32 18, i32 34, i32 3, i32 19, i32 35, i32 4, i32 20, i32 36, i32 5, i32 21, i32 37, i32 6, i32 22, i32 38, i32 7, i32 23, i32 39, i32 8, i32 24, i32 40, i32 9, i32 25, i32 41, i32 10, i32 26, i32 42, i32 11, i32 27, i32 43, i32 12, i32 28, i32 44, i32 13, i32 29, i32 45, i32 14, i32 30, i32 46, i32 15, i32 31, i32 47>
+
+  store <48 x i16> %interleaved.vec, <48 x i16>* %out.vec, align 32
+
+  ret void
+}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
new file mode 100644
index 000000000000..6dc5bfadcbec
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
@@ -0,0 +1,185 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2,AVX2-SLOW %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-shuffle | FileCheck --check-prefixes=AVX2,AVX2-FAST %s
+
+; These patterns are produced by LoopVectorizer for interleaved stores.
+
+define void @vf2(<2 x i16>* %in.vecptr0, <2 x i16>* %in.vecptr1, <2 x i16>* %in.vecptr2, <2 x i16>* %in.vecptr3, <8 x i16>* %out.vec) nounwind {
+; AVX2-LABEL: vf2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,2,3,10,11,4,5,12,13,6,7,14,15]
+; AVX2-NEXT:    vmovdqa %xmm0, (%r8)
+; AVX2-NEXT:    retq
+  %in.vec0 = load <2 x i16>, <2 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <2 x i16>, <2 x i16>* %in.vecptr1, align 32
+  %in.vec2 = load <2 x i16>, <2 x i16>* %in.vecptr2, align 32
+  %in.vec3 = load <2 x i16>, <2 x i16>* %in.vecptr3, align 32
+
+  %concat01 = shufflevector <2 x i16> %in.vec0, <2 x i16> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %concat23 = shufflevector <2 x i16> %in.vec2, <2 x i16> %in.vec3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %concat0123 = shufflevector <4 x i16> %concat01, <4 x i16> %concat23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %interleaved.vec = shufflevector <8 x i16> %concat0123, <8 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
+
+  store <8 x i16> %interleaved.vec, <8 x i16>* %out.vec, align 32
+
+  ret void
+}
+
+define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <4 x i16>* %in.vecptr2, <4 x i16>* %in.vecptr3, <16 x i16>* %out.vec) nounwind {
+; AVX2-SLOW-LABEL: vf4:
+; AVX2-SLOW:       # %bb.0:
+; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX2-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,1,8,9,u,u,u,u,2,3,10,11,20,21,28,29,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%r8)
+; AVX2-SLOW-NEXT:    vzeroupper
+; AVX2-SLOW-NEXT:    retq
+;
+; AVX2-FAST-LABEL: vf4:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX2-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,2,4,6,1,3,5,7]
+; AVX2-FAST-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,2,3,6,7,10,11,14,15,16,17,20,21,24,25,28,29,18,19,22,23,26,27,30,31]
+; AVX2-FAST-NEXT:    vmovdqa %ymm0, (%r8)
+; AVX2-FAST-NEXT:    vzeroupper
+; AVX2-FAST-NEXT:    retq
+  %in.vec0 = load <4 x i16>, <4 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <4 x i16>, <4 x i16>* %in.vecptr1, align 32
+  %in.vec2 = load <4 x i16>, <4 x i16>* %in.vecptr2, align 32
+  %in.vec3 = load <4 x i16>, <4 x i16>* %in.vecptr3, align 32
+
+  %concat01 = shufflevector <4 x i16> %in.vec0, <4 x i16> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %concat23 = shufflevector <4 x i16> %in.vec2, <4 x i16> %in.vec3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %concat0123 = shufflevector <8 x i16> %concat01, <8 x i16> %concat23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %interleaved.vec = shufflevector <16 x i16> %concat0123, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
+
+  store <16 x i16> %interleaved.vec, <16 x i16>* %out.vec, align 32
+
+  ret void
+}
+
+define void @vf8(<8 x i16>* %in.vecptr0, <8 x i16>* %in.vecptr1, <8 x i16>* %in.vecptr2, <8 x i16>* %in.vecptr3, <32 x i16>* %out.vec) nounwind {
+; AVX2-LABEL: vf8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX2-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
+; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[0,2,0,2]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,4,5,12,13,u,u,u,u,6,7,14,15>
+; AVX2-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,2,0,2]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm5 = <0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,4,5,12,13,u,u,u,u,6,7,14,15,u,u,u,u>
+; AVX2-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
+; AVX2-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
+; AVX2-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT:    vmovdqa %ymm0, 32(%r8)
+; AVX2-NEXT:    vmovdqa %ymm2, (%r8)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+  %in.vec0 = load <8 x i16>, <8 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <8 x i16>, <8 x i16>* %in.vecptr1, align 32
+  %in.vec2 = load <8 x i16>, <8 x i16>* %in.vecptr2, align 32
+  %in.vec3 = load <8 x i16>, <8 x i16>* %in.vecptr3, align 32
+
+  %concat01 = shufflevector <8 x i16> %in.vec0, <8 x i16> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %concat23 = shufflevector <8 x i16> %in.vec2, <8 x i16> %in.vec3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %concat0123 = shufflevector <16 x i16> %concat01, <16 x i16> %concat23, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %interleaved.vec = shufflevector <32 x i16> %concat0123, <32 x i16> poison, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
+
+  store <32 x i16> %interleaved.vec, <32 x i16>* %out.vec, align 32
+
+  ret void
+}
+
+define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>* %in.vecptr2, <16 x i16>* %in.vecptr3, <64 x i16>* %out.vec) nounwind {
+; AVX2-LABEL: vf16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rcx), %xmm5
+; AVX2-NEXT:    vmovdqa 16(%rcx), %xmm8
+; AVX2-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX2-NEXT:    vmovdqa 16(%rdx), %xmm9
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX2-NEXT:    vmovdqa (%rsi), %xmm7
+; AVX2-NEXT:    vmovdqa 16(%rsi), %xmm3
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vmovdqa 16(%rdi), %xmm4
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm10, %ymm1
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[0,0,1,1]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm10, %ymm1
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm10, %ymm2
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm5, %ymm2
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm5, %ymm0
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm5, %ymm2
+; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
+; AVX2-NEXT:    vmovdqa %ymm2, 96(%r8)
+; AVX2-NEXT:    vmovdqa %ymm0, (%r8)
+; AVX2-NEXT:    vmovdqa %ymm1, 32(%r8)
+; AVX2-NEXT:    vmovdqa %ymm11, 64(%r8)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+  %in.vec0 = load <16 x i16>, <16 x i16>* %in.vecptr0, align 32
+  %in.vec1 = load <16 x i16>, <16 x i16>* %in.vecptr1, align 32
+  %in.vec2 = load <16 x i16>, <16 x i16>* %in.vecptr2, align 32
+  %in.vec3 = load <16 x i16>, <16 x i16>* %in.vecptr3, align 32
+
+  %concat01 = shufflevector <16 x i16> %in.vec0, <16 x i16> %in.vec1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %concat23 = shufflevector <16 x i16> %in.vec2, <16 x i16> %in.vec3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %concat0123 = shufflevector <32 x i16> %concat01, <32 x i16> %concat23, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  %interleaved.vec = shufflevector <64 x i16> %concat0123, <64 x i16> poison, <64 x i32> <i32 0, i32 16, i32 32, i32 48, i32 1, i32 17, i32 33, i32 49, i32 2, i32 18, i32 34, i32 50, i32 3, i32 19, i32 35, i32 51, i32 4, i32 20, i32 36, i32 52, i32 5, i32 21, i32 37, i32 53, i32 6, i32 22, i32 38, i32 54, i32 7, i32 23, i32 39, i32 55, i32 8, i32 24, i32 40, i32 56, i32 9, i32 25, i32 41, i32 57, i32 10, i32 26, i32 42, i32 58, i32 11, i32 27, i32 43, i32 59, i32 12, i32 28, i32 44, i32 60, i32 13, i32 29, i32 45, i32 61, i32 14, i32 30, i32 46, i32 62, i32 15, i32 31, i32 47, i32 63>
+
+  store <64 x i16> %interleaved.vec, <64 x i16>* %out.vec, align 32
+
+  ret void
+}


        


More information about the llvm-commits mailing list