[llvm] [CodeGen] Increase NumVisited limit to 18 (PR #80627)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 7 17:03:43 PST 2024
https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/80627
>From de287268800a4d226df105e5fcca5e7ef1f31c97 Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Sun, 4 Feb 2024 20:14:49 -0500
Subject: [PATCH] [CodeGen] Increase NumVisited limit to 18
Now that hardware has progressed, we can increase the limit to a more sensible one.
---
.../lib/CodeGen/TwoAddressInstructionPass.cpp | 4 +-
...streaming-mode-fixed-length-int-extends.ll | 186 +-
...e-streaming-mode-fixed-length-int-to-fp.ll | 66 +-
llvm/test/CodeGen/ARM/copy-by-struct-i32.ll | 34 +-
llvm/test/CodeGen/Thumb2/mve-shuffle.ll | 14 +-
llvm/test/CodeGen/Thumb2/mve-vld3.ll | 78 +-
llvm/test/CodeGen/Thumb2/mve-vldst4.ll | 56 +-
llvm/test/CodeGen/X86/machine-cp.ll | 40 +-
llvm/test/CodeGen/X86/oddshuffles.ll | 74 +-
llvm/test/CodeGen/X86/pmulh.ll | 98 +-
.../vector-interleaved-load-i16-stride-3.ll | 38 +-
.../vector-interleaved-load-i16-stride-5.ll | 145 +-
.../vector-interleaved-load-i16-stride-8.ll | 281 +-
.../vector-interleaved-load-i32-stride-3.ll | 154 +-
.../vector-interleaved-load-i32-stride-6.ll | 191 +-
.../vector-interleaved-load-i8-stride-4.ll | 80 +-
.../vector-interleaved-load-i8-stride-5.ll | 3043 +++++++--------
.../vector-interleaved-load-i8-stride-6.ll | 2876 +++++++-------
.../vector-interleaved-load-i8-stride-7.ll | 3453 +++++++++--------
.../vector-interleaved-store-i16-stride-5.ll | 2063 +++++-----
.../vector-interleaved-store-i16-stride-7.ll | 39 +-
.../vector-interleaved-store-i8-stride-7.ll | 3025 +++++++--------
llvm/test/CodeGen/X86/vselect-minmax.ll | 69 +-
23 files changed, 8035 insertions(+), 8072 deletions(-)
diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index ebacbc420f8580..759c8519cb8bf3 100644
--- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -921,7 +921,7 @@ bool TwoAddressInstructionPass::rescheduleMIBelowKill(
// Debug or pseudo instructions cannot be counted against the limit.
if (OtherMI.isDebugOrPseudoInstr())
continue;
- if (NumVisited > 10) // FIXME: Arbitrary limit to reduce compile time cost.
+ if (NumVisited >= 18) // FIXME: Arbitrary limit to reduce compile time cost.
return false;
++NumVisited;
if (OtherMI.hasUnmodeledSideEffects() || OtherMI.isCall() ||
@@ -1094,7 +1094,7 @@ bool TwoAddressInstructionPass::rescheduleKillAboveMI(
// Debug or pseudo instructions cannot be counted against the limit.
if (OtherMI.isDebugOrPseudoInstr())
continue;
- if (NumVisited > 10) // FIXME: Arbitrary limit to reduce compile time cost.
+ if (NumVisited >= 18) // FIXME: Arbitrary limit to reduce compile time cost.
return false;
++NumVisited;
if (OtherMI.hasUnmodeledSideEffects() || OtherMI.isCall() ||
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
index c7a89612d278f2..2b12dd57cd9a91 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
@@ -236,22 +236,20 @@ define void @sext_v16i8_v16i64(<16 x i8> %a, ptr %out) {
; CHECK-NEXT: sunpklo z4.d, z2.s
; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8
; CHECK-NEXT: sunpklo z0.s, z0.h
-; CHECK-NEXT: mov z7.d, z1.d
-; CHECK-NEXT: sunpklo z2.d, z2.s
+; CHECK-NEXT: sunpklo z7.d, z1.s
+; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
; CHECK-NEXT: sunpklo z5.d, z3.s
; CHECK-NEXT: ext z3.b, z3.b, z3.b, #8
-; CHECK-NEXT: ext z7.b, z7.b, z1.b, #8
+; CHECK-NEXT: sunpklo z2.d, z2.s
; CHECK-NEXT: sunpklo z1.d, z1.s
-; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: sunpklo z6.d, z0.s
+; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8
; CHECK-NEXT: sunpklo z3.d, z3.s
; CHECK-NEXT: stp q4, q2, [x0]
-; CHECK-NEXT: sunpklo z4.d, z7.s
-; CHECK-NEXT: ext z6.b, z6.b, z0.b, #8
; CHECK-NEXT: sunpklo z0.d, z0.s
+; CHECK-NEXT: stp q7, q1, [x0, #32]
; CHECK-NEXT: stp q5, q3, [x0, #64]
-; CHECK-NEXT: sunpklo z2.d, z6.s
-; CHECK-NEXT: stp q1, q4, [x0, #32]
-; CHECK-NEXT: stp q0, q2, [x0, #96]
+; CHECK-NEXT: stp q6, q0, [x0, #96]
; CHECK-NEXT: ret
%b = sext <16 x i8> %a to <16 x i64>
store <16 x i64> %b, ptr %out
@@ -264,62 +262,59 @@ define void @sext_v32i8_v32i64(ptr %in, ptr %out) {
; CHECK-NEXT: ldp q1, q0, [x0]
; CHECK-NEXT: add z0.b, z0.b, z0.b
; CHECK-NEXT: add z1.b, z1.b, z1.b
-; CHECK-NEXT: mov z2.d, z0.d
+; CHECK-NEXT: sunpklo z2.h, z0.b
+; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT: sunpklo z3.h, z1.b
+; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
; CHECK-NEXT: sunpklo z0.h, z0.b
-; CHECK-NEXT: mov z3.d, z1.d
-; CHECK-NEXT: sunpklo z1.h, z1.b
+; CHECK-NEXT: sunpklo z4.s, z2.h
+; CHECK-NEXT: sunpklo z5.s, z3.h
; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT: sunpklo z1.h, z1.b
; CHECK-NEXT: ext z3.b, z3.b, z3.b, #8
-; CHECK-NEXT: sunpklo z4.s, z0.h
+; CHECK-NEXT: sunpklo z6.s, z0.h
+; CHECK-NEXT: sunpklo z2.s, z2.h
; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8
-; CHECK-NEXT: sunpklo z5.s, z1.h
-; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
-; CHECK-NEXT: sunpklo z2.h, z2.b
-; CHECK-NEXT: sunpklo z3.h, z3.b
-; CHECK-NEXT: sunpklo z0.s, z0.h
-; CHECK-NEXT: sunpklo z16.d, z4.s
+; CHECK-NEXT: sunpklo z7.d, z4.s
; CHECK-NEXT: ext z4.b, z4.b, z4.b, #8
-; CHECK-NEXT: sunpklo z1.s, z1.h
; CHECK-NEXT: sunpklo z17.d, z5.s
; CHECK-NEXT: ext z5.b, z5.b, z5.b, #8
-; CHECK-NEXT: sunpklo z6.s, z2.h
-; CHECK-NEXT: sunpklo z7.s, z3.h
-; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT: sunpklo z16.s, z1.h
+; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT: sunpklo z3.s, z3.h
+; CHECK-NEXT: sunpklo z0.s, z0.h
; CHECK-NEXT: sunpklo z4.d, z4.s
-; CHECK-NEXT: ext z3.b, z3.b, z3.b, #8
-; CHECK-NEXT: sunpklo z19.d, z0.s
-; CHECK-NEXT: sunpklo z5.d, z5.s
-; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8
-; CHECK-NEXT: sunpklo z2.s, z2.h
; CHECK-NEXT: sunpklo z18.d, z6.s
; CHECK-NEXT: ext z6.b, z6.b, z6.b, #8
-; CHECK-NEXT: sunpklo z3.s, z3.h
-; CHECK-NEXT: stp q16, q4, [x1, #128]
-; CHECK-NEXT: mov z16.d, z7.d
-; CHECK-NEXT: sunpklo z0.d, z0.s
-; CHECK-NEXT: stp q17, q5, [x1]
-; CHECK-NEXT: sunpklo z5.d, z7.s
-; CHECK-NEXT: sunpklo z4.d, z6.s
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: ext z16.b, z16.b, z7.b, #8
+; CHECK-NEXT: sunpklo z5.d, z5.s
+; CHECK-NEXT: sunpklo z1.s, z1.h
+; CHECK-NEXT: sunpklo z19.d, z16.s
+; CHECK-NEXT: ext z16.b, z16.b, z16.b, #8
+; CHECK-NEXT: sunpklo z6.d, z6.s
+; CHECK-NEXT: stp q7, q4, [x1, #128]
; CHECK-NEXT: mov z7.d, z2.d
-; CHECK-NEXT: stp q19, q0, [x1, #160]
-; CHECK-NEXT: sunpklo z0.d, z2.s
-; CHECK-NEXT: ext z6.b, z6.b, z1.b, #8
-; CHECK-NEXT: sunpklo z1.d, z1.s
-; CHECK-NEXT: stp q18, q4, [x1, #192]
; CHECK-NEXT: mov z4.d, z3.d
-; CHECK-NEXT: ext z7.b, z7.b, z2.b, #8
+; CHECK-NEXT: stp q17, q5, [x1]
+; CHECK-NEXT: mov z5.d, z0.d
+; CHECK-NEXT: sunpklo z2.d, z2.s
; CHECK-NEXT: sunpklo z16.d, z16.s
-; CHECK-NEXT: sunpklo z6.d, z6.s
+; CHECK-NEXT: ext z7.b, z7.b, z7.b, #8
; CHECK-NEXT: ext z4.b, z4.b, z3.b, #8
-; CHECK-NEXT: sunpklo z2.d, z7.s
+; CHECK-NEXT: stp q18, q6, [x1, #192]
+; CHECK-NEXT: ext z5.b, z5.b, z0.b, #8
+; CHECK-NEXT: sunpklo z6.d, z1.s
+; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
; CHECK-NEXT: sunpklo z3.d, z3.s
-; CHECK-NEXT: stp q5, q16, [x1, #64]
-; CHECK-NEXT: stp q1, q6, [x1, #32]
-; CHECK-NEXT: sunpklo z1.d, z4.s
+; CHECK-NEXT: sunpklo z0.d, z0.s
+; CHECK-NEXT: sunpklo z7.d, z7.s
+; CHECK-NEXT: sunpklo z4.d, z4.s
+; CHECK-NEXT: stp q19, q16, [x1, #64]
+; CHECK-NEXT: sunpklo z1.d, z1.s
+; CHECK-NEXT: stp q3, q4, [x1, #32]
+; CHECK-NEXT: stp q6, q1, [x1, #96]
+; CHECK-NEXT: stp q2, q7, [x1, #160]
+; CHECK-NEXT: sunpklo z2.d, z5.s
; CHECK-NEXT: stp q0, q2, [x1, #224]
-; CHECK-NEXT: stp q3, q1, [x1, #96]
; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %in
%b = add <32 x i8> %a, %a
@@ -661,22 +656,20 @@ define void @zext_v16i8_v16i64(<16 x i8> %a, ptr %out) {
; CHECK-NEXT: uunpklo z4.d, z2.s
; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8
; CHECK-NEXT: uunpklo z0.s, z0.h
-; CHECK-NEXT: mov z7.d, z1.d
-; CHECK-NEXT: uunpklo z2.d, z2.s
+; CHECK-NEXT: uunpklo z7.d, z1.s
+; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
; CHECK-NEXT: uunpklo z5.d, z3.s
; CHECK-NEXT: ext z3.b, z3.b, z3.b, #8
-; CHECK-NEXT: ext z7.b, z7.b, z1.b, #8
+; CHECK-NEXT: uunpklo z2.d, z2.s
; CHECK-NEXT: uunpklo z1.d, z1.s
-; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: uunpklo z6.d, z0.s
+; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8
; CHECK-NEXT: uunpklo z3.d, z3.s
; CHECK-NEXT: stp q4, q2, [x0]
-; CHECK-NEXT: uunpklo z4.d, z7.s
-; CHECK-NEXT: ext z6.b, z6.b, z0.b, #8
; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: stp q7, q1, [x0, #32]
; CHECK-NEXT: stp q5, q3, [x0, #64]
-; CHECK-NEXT: uunpklo z2.d, z6.s
-; CHECK-NEXT: stp q1, q4, [x0, #32]
-; CHECK-NEXT: stp q0, q2, [x0, #96]
+; CHECK-NEXT: stp q6, q0, [x0, #96]
; CHECK-NEXT: ret
%b = zext <16 x i8> %a to <16 x i64>
store <16 x i64> %b, ptr %out
@@ -689,62 +682,59 @@ define void @zext_v32i8_v32i64(ptr %in, ptr %out) {
; CHECK-NEXT: ldp q1, q0, [x0]
; CHECK-NEXT: add z0.b, z0.b, z0.b
; CHECK-NEXT: add z1.b, z1.b, z1.b
-; CHECK-NEXT: mov z2.d, z0.d
+; CHECK-NEXT: uunpklo z2.h, z0.b
+; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT: uunpklo z3.h, z1.b
+; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
; CHECK-NEXT: uunpklo z0.h, z0.b
-; CHECK-NEXT: mov z3.d, z1.d
-; CHECK-NEXT: uunpklo z1.h, z1.b
+; CHECK-NEXT: uunpklo z4.s, z2.h
+; CHECK-NEXT: uunpklo z5.s, z3.h
; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT: uunpklo z1.h, z1.b
; CHECK-NEXT: ext z3.b, z3.b, z3.b, #8
-; CHECK-NEXT: uunpklo z4.s, z0.h
+; CHECK-NEXT: uunpklo z6.s, z0.h
+; CHECK-NEXT: uunpklo z2.s, z2.h
; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8
-; CHECK-NEXT: uunpklo z5.s, z1.h
-; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
-; CHECK-NEXT: uunpklo z2.h, z2.b
-; CHECK-NEXT: uunpklo z3.h, z3.b
-; CHECK-NEXT: uunpklo z0.s, z0.h
-; CHECK-NEXT: uunpklo z16.d, z4.s
+; CHECK-NEXT: uunpklo z7.d, z4.s
; CHECK-NEXT: ext z4.b, z4.b, z4.b, #8
-; CHECK-NEXT: uunpklo z1.s, z1.h
; CHECK-NEXT: uunpklo z17.d, z5.s
; CHECK-NEXT: ext z5.b, z5.b, z5.b, #8
-; CHECK-NEXT: uunpklo z6.s, z2.h
-; CHECK-NEXT: uunpklo z7.s, z3.h
-; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT: uunpklo z16.s, z1.h
+; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT: uunpklo z3.s, z3.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
; CHECK-NEXT: uunpklo z4.d, z4.s
-; CHECK-NEXT: ext z3.b, z3.b, z3.b, #8
-; CHECK-NEXT: uunpklo z19.d, z0.s
-; CHECK-NEXT: uunpklo z5.d, z5.s
-; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8
-; CHECK-NEXT: uunpklo z2.s, z2.h
; CHECK-NEXT: uunpklo z18.d, z6.s
; CHECK-NEXT: ext z6.b, z6.b, z6.b, #8
-; CHECK-NEXT: uunpklo z3.s, z3.h
-; CHECK-NEXT: stp q16, q4, [x1, #128]
-; CHECK-NEXT: mov z16.d, z7.d
-; CHECK-NEXT: uunpklo z0.d, z0.s
-; CHECK-NEXT: stp q17, q5, [x1]
-; CHECK-NEXT: uunpklo z5.d, z7.s
-; CHECK-NEXT: uunpklo z4.d, z6.s
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: ext z16.b, z16.b, z7.b, #8
+; CHECK-NEXT: uunpklo z5.d, z5.s
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpklo z19.d, z16.s
+; CHECK-NEXT: ext z16.b, z16.b, z16.b, #8
+; CHECK-NEXT: uunpklo z6.d, z6.s
+; CHECK-NEXT: stp q7, q4, [x1, #128]
; CHECK-NEXT: mov z7.d, z2.d
-; CHECK-NEXT: stp q19, q0, [x1, #160]
-; CHECK-NEXT: uunpklo z0.d, z2.s
-; CHECK-NEXT: ext z6.b, z6.b, z1.b, #8
-; CHECK-NEXT: uunpklo z1.d, z1.s
-; CHECK-NEXT: stp q18, q4, [x1, #192]
; CHECK-NEXT: mov z4.d, z3.d
-; CHECK-NEXT: ext z7.b, z7.b, z2.b, #8
+; CHECK-NEXT: stp q17, q5, [x1]
+; CHECK-NEXT: mov z5.d, z0.d
+; CHECK-NEXT: uunpklo z2.d, z2.s
; CHECK-NEXT: uunpklo z16.d, z16.s
-; CHECK-NEXT: uunpklo z6.d, z6.s
+; CHECK-NEXT: ext z7.b, z7.b, z7.b, #8
; CHECK-NEXT: ext z4.b, z4.b, z3.b, #8
-; CHECK-NEXT: uunpklo z2.d, z7.s
+; CHECK-NEXT: stp q18, q6, [x1, #192]
+; CHECK-NEXT: ext z5.b, z5.b, z0.b, #8
+; CHECK-NEXT: uunpklo z6.d, z1.s
+; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
; CHECK-NEXT: uunpklo z3.d, z3.s
-; CHECK-NEXT: stp q5, q16, [x1, #64]
-; CHECK-NEXT: stp q1, q6, [x1, #32]
-; CHECK-NEXT: uunpklo z1.d, z4.s
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: uunpklo z7.d, z7.s
+; CHECK-NEXT: uunpklo z4.d, z4.s
+; CHECK-NEXT: stp q19, q16, [x1, #64]
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: stp q3, q4, [x1, #32]
+; CHECK-NEXT: stp q6, q1, [x1, #96]
+; CHECK-NEXT: stp q2, q7, [x1, #160]
+; CHECK-NEXT: uunpklo z2.d, z5.s
; CHECK-NEXT: stp q0, q2, [x1, #224]
-; CHECK-NEXT: stp q3, q1, [x1, #96]
; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %in
%b = add <32 x i8> %a, %a
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
index c110e89326cc0c..9d84af1c60cdd9 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
@@ -207,36 +207,35 @@ define void @ucvtf_v16i16_v16f64(ptr %a, ptr %b) {
; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
; CHECK-NEXT: uunpklo z0.s, z0.h
; CHECK-NEXT: uunpklo z1.s, z1.h
-; CHECK-NEXT: mov z4.d, z2.d
+; CHECK-NEXT: uunpklo z4.d, z2.s
+; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8
; CHECK-NEXT: mov z7.d, z3.d
-; CHECK-NEXT: mov z5.d, z0.d
-; CHECK-NEXT: ext z4.b, z4.b, z2.b, #8
+; CHECK-NEXT: uunpklo z5.d, z0.s
+; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8
; CHECK-NEXT: uunpklo z2.d, z2.s
; CHECK-NEXT: mov z6.d, z1.d
+; CHECK-NEXT: ucvtf z4.d, p0/m, z4.d
; CHECK-NEXT: ext z7.b, z7.b, z3.b, #8
; CHECK-NEXT: uunpklo z3.d, z3.s
-; CHECK-NEXT: ext z5.b, z5.b, z0.b, #8
-; CHECK-NEXT: uunpklo z4.d, z4.s
; CHECK-NEXT: uunpklo z0.d, z0.s
; CHECK-NEXT: ext z6.b, z6.b, z1.b, #8
; CHECK-NEXT: uunpklo z1.d, z1.s
; CHECK-NEXT: ucvtf z2.d, p0/m, z2.d
-; CHECK-NEXT: ucvtf z3.d, p0/m, z3.d
+; CHECK-NEXT: ucvtf z5.d, p0/m, z5.d
; CHECK-NEXT: uunpklo z7.d, z7.s
-; CHECK-NEXT: uunpklo z5.d, z5.s
-; CHECK-NEXT: ucvtf z4.d, p0/m, z4.d
; CHECK-NEXT: ucvtf z0.d, p0/m, z0.d
; CHECK-NEXT: uunpklo z6.d, z6.s
; CHECK-NEXT: ucvtf z1.d, p0/m, z1.d
-; CHECK-NEXT: ucvtf z5.d, p0/m, z5.d
-; CHECK-NEXT: stp q2, q4, [x1, #64]
-; CHECK-NEXT: movprfx z2, z6
-; CHECK-NEXT: ucvtf z2.d, p0/m, z6.d
-; CHECK-NEXT: stp q1, q2, [x1, #32]
-; CHECK-NEXT: stp q0, q5, [x1, #96]
-; CHECK-NEXT: movprfx z0, z7
-; CHECK-NEXT: ucvtf z0.d, p0/m, z7.d
-; CHECK-NEXT: stp q3, q0, [x1]
+; CHECK-NEXT: stp q4, q2, [x1, #64]
+; CHECK-NEXT: movprfx z4, z6
+; CHECK-NEXT: ucvtf z4.d, p0/m, z6.d
+; CHECK-NEXT: movprfx z2, z3
+; CHECK-NEXT: ucvtf z2.d, p0/m, z3.d
+; CHECK-NEXT: movprfx z3, z7
+; CHECK-NEXT: ucvtf z3.d, p0/m, z7.d
+; CHECK-NEXT: stp q2, q3, [x1]
+; CHECK-NEXT: stp q5, q0, [x1, #96]
+; CHECK-NEXT: stp q1, q4, [x1, #32]
; CHECK-NEXT: ret
%op1 = load <16 x i16>, ptr %a
%res = uitofp <16 x i16> %op1 to <16 x double>
@@ -780,36 +779,35 @@ define void @scvtf_v16i16_v16f64(ptr %a, ptr %b) {
; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8
; CHECK-NEXT: sunpklo z0.s, z0.h
; CHECK-NEXT: sunpklo z1.s, z1.h
-; CHECK-NEXT: mov z4.d, z2.d
+; CHECK-NEXT: sunpklo z4.d, z2.s
+; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8
; CHECK-NEXT: mov z7.d, z3.d
-; CHECK-NEXT: mov z5.d, z0.d
-; CHECK-NEXT: ext z4.b, z4.b, z2.b, #8
+; CHECK-NEXT: sunpklo z5.d, z0.s
+; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8
; CHECK-NEXT: sunpklo z2.d, z2.s
; CHECK-NEXT: mov z6.d, z1.d
+; CHECK-NEXT: scvtf z4.d, p0/m, z4.d
; CHECK-NEXT: ext z7.b, z7.b, z3.b, #8
; CHECK-NEXT: sunpklo z3.d, z3.s
-; CHECK-NEXT: ext z5.b, z5.b, z0.b, #8
-; CHECK-NEXT: sunpklo z4.d, z4.s
; CHECK-NEXT: sunpklo z0.d, z0.s
; CHECK-NEXT: ext z6.b, z6.b, z1.b, #8
; CHECK-NEXT: sunpklo z1.d, z1.s
; CHECK-NEXT: scvtf z2.d, p0/m, z2.d
-; CHECK-NEXT: scvtf z3.d, p0/m, z3.d
+; CHECK-NEXT: scvtf z5.d, p0/m, z5.d
; CHECK-NEXT: sunpklo z7.d, z7.s
-; CHECK-NEXT: sunpklo z5.d, z5.s
-; CHECK-NEXT: scvtf z4.d, p0/m, z4.d
; CHECK-NEXT: scvtf z0.d, p0/m, z0.d
; CHECK-NEXT: sunpklo z6.d, z6.s
; CHECK-NEXT: scvtf z1.d, p0/m, z1.d
-; CHECK-NEXT: scvtf z5.d, p0/m, z5.d
-; CHECK-NEXT: stp q2, q4, [x1, #64]
-; CHECK-NEXT: movprfx z2, z6
-; CHECK-NEXT: scvtf z2.d, p0/m, z6.d
-; CHECK-NEXT: stp q1, q2, [x1, #32]
-; CHECK-NEXT: stp q0, q5, [x1, #96]
-; CHECK-NEXT: movprfx z0, z7
-; CHECK-NEXT: scvtf z0.d, p0/m, z7.d
-; CHECK-NEXT: stp q3, q0, [x1]
+; CHECK-NEXT: stp q4, q2, [x1, #64]
+; CHECK-NEXT: movprfx z4, z6
+; CHECK-NEXT: scvtf z4.d, p0/m, z6.d
+; CHECK-NEXT: movprfx z2, z3
+; CHECK-NEXT: scvtf z2.d, p0/m, z3.d
+; CHECK-NEXT: movprfx z3, z7
+; CHECK-NEXT: scvtf z3.d, p0/m, z7.d
+; CHECK-NEXT: stp q2, q3, [x1]
+; CHECK-NEXT: stp q5, q0, [x1, #96]
+; CHECK-NEXT: stp q1, q4, [x1, #32]
; CHECK-NEXT: ret
%op1 = load <16 x i16>, ptr %a
%res = sitofp <16 x i16> %op1 to <16 x double>
diff --git a/llvm/test/CodeGen/ARM/copy-by-struct-i32.ll b/llvm/test/CodeGen/ARM/copy-by-struct-i32.ll
index 34aab4c04b1093..8f134e0ac7f18b 100644
--- a/llvm/test/CodeGen/ARM/copy-by-struct-i32.ll
+++ b/llvm/test/CodeGen/ARM/copy-by-struct-i32.ll
@@ -22,23 +22,23 @@ define arm_aapcscc void @s(ptr %q, ptr %p) {
; ASSEMBLY-NEXT: ldr r2, [r1, #8]
; ASSEMBLY-NEXT: ldr r3, [r1, #12]
; ASSEMBLY-NEXT: strd r4, r5, [sp, #128]
-; ASSEMBLY-NEXT: add r5, r1, #16
-; ASSEMBLY-NEXT: mov r4, sp
-; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
-; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
-; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
-; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
-; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
-; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
-; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
-; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
-; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
-; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
-; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
-; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
-; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
-; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: add r4, r1, #16
+; ASSEMBLY-NEXT: mov r5, sp
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r4]!
; ASSEMBLY-NEXT: movw r4, #72
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r5]!
; ASSEMBLY-NEXT: .LBB0_1: @ %entry
; ASSEMBLY-NEXT: @ =>This Inner Loop Header: Depth=1
; ASSEMBLY-NEXT: vld1.32 {d16}, [r1]!
@@ -58,3 +58,5 @@ entry:
}
declare arm_aapcscc void @r(...)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; BEFORE-EXPAND: {{.*}}
diff --git a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
index f4643f8c6c4a1f..a594feda2b2672 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
@@ -355,30 +355,30 @@ define arm_aapcs_vfpcc <8 x i16> @shuffle3step_i16(<32 x i16> %src) {
; CHECK-NEXT: vins.f16 s16, s2
; CHECK-NEXT: vmovx.f16 s2, s5
; CHECK-NEXT: vmov.f32 s17, s4
-; CHECK-NEXT: vmovx.f16 s13, s3
+; CHECK-NEXT: vmovx.f16 s14, s6
; CHECK-NEXT: vins.f16 s17, s2
; CHECK-NEXT: vmov.f32 s18, s7
; CHECK-NEXT: vmovx.f16 s2, s8
-; CHECK-NEXT: vmov.f32 s19, s10
+; CHECK-NEXT: vins.f16 s14, s8
; CHECK-NEXT: vins.f16 s18, s2
; CHECK-NEXT: vmovx.f16 s2, s11
-; CHECK-NEXT: vins.f16 s19, s2
+; CHECK-NEXT: vmovx.f16 s8, s10
+; CHECK-NEXT: vins.f16 s10, s2
; CHECK-NEXT: vmovx.f16 s2, s1
+; CHECK-NEXT: vmovx.f16 s13, s3
; CHECK-NEXT: vins.f16 s0, s2
; CHECK-NEXT: vmovx.f16 s2, s4
; CHECK-NEXT: vins.f16 s3, s2
; CHECK-NEXT: vmovx.f16 s2, s7
-; CHECK-NEXT: vmovx.f16 s4, s10
-; CHECK-NEXT: vmovx.f16 s14, s6
; CHECK-NEXT: vmovx.f16 s15, s9
; CHECK-NEXT: vins.f16 s6, s2
-; CHECK-NEXT: vins.f16 s9, s4
+; CHECK-NEXT: vins.f16 s9, s8
; CHECK-NEXT: vmov.f32 s1, s3
-; CHECK-NEXT: vins.f16 s14, s8
; CHECK-NEXT: vins.f16 s15, s11
; CHECK-NEXT: vins.f16 s13, s5
; CHECK-NEXT: vmov.f32 s2, s6
; CHECK-NEXT: vmov.f32 s3, s9
+; CHECK-NEXT: vmov.f32 s19, s10
; CHECK-NEXT: vadd.i16 q0, q0, q3
; CHECK-NEXT: vadd.i16 q0, q0, q4
; CHECK-NEXT: vpop {d8, d9}
diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
index ccdc996d75970e..816ce19bca0bac 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
@@ -388,34 +388,34 @@ define void @vld3_v16i16(ptr %src, ptr %dst) {
; CHECK-LV-NEXT: vins.f16 s6, s8
; CHECK-LV-NEXT: vmov.f32 s22, s15
; CHECK-LV-NEXT: vmovx.f16 s8, s8
+; CHECK-LV-NEXT: vmovx.f16 s7, s9
; CHECK-LV-NEXT: vins.f16 s22, s8
; CHECK-LV-NEXT: vmovx.f16 s8, s11
-; CHECK-LV-NEXT: vmov.f32 s23, s10
-; CHECK-LV-NEXT: vmovx.f16 s4, s16
-; CHECK-LV-NEXT: vins.f16 s23, s8
+; CHECK-LV-NEXT: vins.f16 s7, s11
+; CHECK-LV-NEXT: vmovx.f16 s11, s10
+; CHECK-LV-NEXT: vins.f16 s10, s8
; CHECK-LV-NEXT: vmovx.f16 s8, s17
+; CHECK-LV-NEXT: vmovx.f16 s4, s16
; CHECK-LV-NEXT: vins.f16 s16, s8
; CHECK-LV-NEXT: vmovx.f16 s8, s12
; CHECK-LV-NEXT: vmovx.f16 s5, s19
; CHECK-LV-NEXT: vins.f16 s19, s8
; CHECK-LV-NEXT: vmovx.f16 s8, s15
-; CHECK-LV-NEXT: vmovx.f16 s7, s9
-; CHECK-LV-NEXT: vins.f16 s14, s8
-; CHECK-LV-NEXT: vmovx.f16 s8, s10
; CHECK-LV-NEXT: vins.f16 s4, s18
; CHECK-LV-NEXT: vmov.f32 s20, s17
; CHECK-LV-NEXT: vmovx.f16 s18, s18
-; CHECK-LV-NEXT: vins.f16 s9, s8
+; CHECK-LV-NEXT: vins.f16 s14, s8
+; CHECK-LV-NEXT: vins.f16 s9, s11
; CHECK-LV-NEXT: vins.f16 s5, s13
; CHECK-LV-NEXT: vins.f16 s20, s18
; CHECK-LV-NEXT: vmov.f32 s17, s19
-; CHECK-LV-NEXT: vins.f16 s7, s11
; CHECK-LV-NEXT: vmovx.f16 s13, s13
; CHECK-LV-NEXT: vmov.f32 s21, s12
+; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16]
; CHECK-LV-NEXT: vmov.f32 s18, s14
; CHECK-LV-NEXT: vins.f16 s21, s13
; CHECK-LV-NEXT: vmov.f32 s19, s9
-; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT: vmov.f32 s23, s10
; CHECK-LV-NEXT: vadd.i16 q1, q4, q1
; CHECK-LV-NEXT: vadd.i16 q1, q1, q5
; CHECK-LV-NEXT: vstrw.32 q1, [r1]
@@ -469,34 +469,34 @@ define void @vld3_v16i16(ptr %src, ptr %dst) {
; CHECK-LIS-NEXT: vmov.f32 s22, s19
; CHECK-LIS-NEXT: vins.f16 s6, s8
; CHECK-LIS-NEXT: vmovx.f16 s8, s8
+; CHECK-LIS-NEXT: vmovx.f16 s7, s9
; CHECK-LIS-NEXT: vins.f16 s22, s8
; CHECK-LIS-NEXT: vmovx.f16 s8, s11
-; CHECK-LIS-NEXT: vmov.f32 s23, s10
-; CHECK-LIS-NEXT: vmovx.f16 s4, s12
-; CHECK-LIS-NEXT: vins.f16 s23, s8
+; CHECK-LIS-NEXT: vins.f16 s7, s11
+; CHECK-LIS-NEXT: vmovx.f16 s11, s10
+; CHECK-LIS-NEXT: vins.f16 s10, s8
; CHECK-LIS-NEXT: vmovx.f16 s8, s13
+; CHECK-LIS-NEXT: vmovx.f16 s4, s12
; CHECK-LIS-NEXT: vins.f16 s12, s8
; CHECK-LIS-NEXT: vmovx.f16 s8, s16
; CHECK-LIS-NEXT: vmovx.f16 s5, s15
-; CHECK-LIS-NEXT: vins.f16 s15, s8
-; CHECK-LIS-NEXT: vmovx.f16 s8, s19
; CHECK-LIS-NEXT: vins.f16 s4, s14
+; CHECK-LIS-NEXT: vins.f16 s15, s8
; CHECK-LIS-NEXT: vmov.f32 s20, s13
; CHECK-LIS-NEXT: vmovx.f16 s14, s14
-; CHECK-LIS-NEXT: vins.f16 s18, s8
-; CHECK-LIS-NEXT: vmovx.f16 s8, s10
-; CHECK-LIS-NEXT: vmovx.f16 s7, s9
+; CHECK-LIS-NEXT: vmovx.f16 s8, s19
; CHECK-LIS-NEXT: vins.f16 s20, s14
; CHECK-LIS-NEXT: vmovx.f16 s14, s17
; CHECK-LIS-NEXT: vmov.f32 s21, s16
-; CHECK-LIS-NEXT: vins.f16 s9, s8
+; CHECK-LIS-NEXT: vins.f16 s18, s8
+; CHECK-LIS-NEXT: vins.f16 s9, s11
; CHECK-LIS-NEXT: vins.f16 s21, s14
; CHECK-LIS-NEXT: vmov.f32 s13, s15
-; CHECK-LIS-NEXT: vins.f16 s7, s11
; CHECK-LIS-NEXT: vins.f16 s5, s17
; CHECK-LIS-NEXT: vmov.f32 s14, s18
-; CHECK-LIS-NEXT: vmov.f32 s15, s9
; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT: vmov.f32 s15, s9
+; CHECK-LIS-NEXT: vmov.f32 s23, s10
; CHECK-LIS-NEXT: vadd.i16 q1, q3, q1
; CHECK-LIS-NEXT: vadd.i16 q1, q1, q5
; CHECK-LIS-NEXT: vstrw.32 q1, [r1]
@@ -1192,27 +1192,27 @@ entry:
define void @vld3_v4f16(ptr %src, ptr %dst) {
; CHECK-LABEL: vld3_v4f16:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: ldrd r2, r3, [r0, #16]
; CHECK-NEXT: vldrw.u32 q1, [r0]
-; CHECK-NEXT: vmov.32 q2[0], r2
-; CHECK-NEXT: vmovx.f16 s12, s4
-; CHECK-NEXT: vmov.32 q2[1], r3
-; CHECK-NEXT: vmovx.f16 s13, s7
-; CHECK-NEXT: vmovx.f16 s0, s9
-; CHECK-NEXT: vmov.f32 s1, s8
-; CHECK-NEXT: vins.f16 s1, s0
-; CHECK-NEXT: vmovx.f16 s0, s5
-; CHECK-NEXT: vins.f16 s4, s0
-; CHECK-NEXT: vmovx.f16 s0, s6
-; CHECK-NEXT: vins.f16 s5, s0
-; CHECK-NEXT: vmovx.f16 s0, s8
-; CHECK-NEXT: vins.f16 s7, s0
-; CHECK-NEXT: vmov.f32 s0, s5
-; CHECK-NEXT: vins.f16 s12, s6
-; CHECK-NEXT: vins.f16 s13, s9
+; CHECK-NEXT: ldrd r2, r3, [r0, #16]
+; CHECK-NEXT: vmov.32 q0[0], r2
+; CHECK-NEXT: vmovx.f16 s8, s4
+; CHECK-NEXT: vmov.32 q0[1], r3
+; CHECK-NEXT: vins.f16 s8, s6
+; CHECK-NEXT: vmovx.f16 s6, s6
+; CHECK-NEXT: vmovx.f16 s10, s5
+; CHECK-NEXT: vins.f16 s5, s6
+; CHECK-NEXT: vmovx.f16 s6, s0
+; CHECK-NEXT: vmovx.f16 s9, s7
+; CHECK-NEXT: vmovx.f16 s2, s1
+; CHECK-NEXT: vins.f16 s7, s6
+; CHECK-NEXT: vins.f16 s0, s2
+; CHECK-NEXT: vmov.f32 s12, s5
+; CHECK-NEXT: vins.f16 s4, s10
+; CHECK-NEXT: vins.f16 s9, s1
; CHECK-NEXT: vmov.f32 s5, s7
-; CHECK-NEXT: vadd.f16 q1, q1, q3
-; CHECK-NEXT: vadd.f16 q0, q1, q0
+; CHECK-NEXT: vadd.f16 q1, q1, q2
+; CHECK-NEXT: vmov.f32 s13, s0
+; CHECK-NEXT: vadd.f16 q0, q1, q3
; CHECK-NEXT: vmov r0, r2, d0
; CHECK-NEXT: strd r0, r2, [r1]
; CHECK-NEXT: bx lr
diff --git a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
index 219541cffb940f..48ebeab4a6b439 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
@@ -23,58 +23,58 @@ define void @vldst4(ptr nocapture readonly %pIn, ptr nocapture %pOut, i32 %numRo
; CHECK-NEXT: vldrh.u16 q1, [r0, #32]
; CHECK-NEXT: vldrh.u16 q4, [r0, #48]
; CHECK-NEXT: vldrh.u16 q3, [r0], #64
+; CHECK-NEXT: vmovx.f16 s2, s5
+; CHECK-NEXT: vmovx.f16 s8, s7
+; CHECK-NEXT: vldrh.u16 q5, [r0, #-48]
+; CHECK-NEXT: vmovx.f16 s3, s17
+; CHECK-NEXT: vins.f16 s2, s8
+; CHECK-NEXT: vmovx.f16 s8, s19
; CHECK-NEXT: vmovx.f16 s26, s4
; CHECK-NEXT: vins.f16 s4, s6
; CHECK-NEXT: vmovx.f16 s6, s6
-; CHECK-NEXT: vldrh.u16 q5, [r0, #-48]
+; CHECK-NEXT: vmovx.f16 s0, s13
+; CHECK-NEXT: vins.f16 s3, s8
+; CHECK-NEXT: vmovx.f16 s8, s15
; CHECK-NEXT: vmovx.f16 s27, s16
; CHECK-NEXT: vins.f16 s26, s6
; CHECK-NEXT: vmovx.f16 s6, s18
-; CHECK-NEXT: vmovx.f16 s8, s7
-; CHECK-NEXT: vmovx.f16 s10, s5
+; CHECK-NEXT: vmovx.f16 s1, s21
+; CHECK-NEXT: vins.f16 s0, s8
+; CHECK-NEXT: vmovx.f16 s8, s23
; CHECK-NEXT: vmovx.f16 s24, s12
-; CHECK-NEXT: vins.f16 s10, s8
; CHECK-NEXT: vins.f16 s27, s6
; CHECK-NEXT: vmovx.f16 s6, s14
-; CHECK-NEXT: vmovx.f16 s8, s19
-; CHECK-NEXT: vmovx.f16 s11, s17
-; CHECK-NEXT: vmov.f32 s0, s13
-; CHECK-NEXT: vins.f16 s11, s8
+; CHECK-NEXT: vins.f16 s1, s8
; CHECK-NEXT: vmovx.f16 s25, s20
; CHECK-NEXT: vins.f16 s24, s6
; CHECK-NEXT: vmovx.f16 s6, s22
-; CHECK-NEXT: vmovx.f16 s1, s15
-; CHECK-NEXT: vmovx.f16 s8, s13
-; CHECK-NEXT: vins.f16 s20, s22
+; CHECK-NEXT: vins.f16 s13, s15
; CHECK-NEXT: vins.f16 s16, s18
-; CHECK-NEXT: vmov.f32 s2, s5
+; CHECK-NEXT: vins.f16 s20, s22
; CHECK-NEXT: vins.f16 s25, s6
-; CHECK-NEXT: vmov.f32 s3, s17
-; CHECK-NEXT: vins.f16 s0, s15
-; CHECK-NEXT: vmovx.f16 s9, s21
-; CHECK-NEXT: vins.f16 s8, s1
-; CHECK-NEXT: vmovx.f16 s1, s23
-; CHECK-NEXT: vins.f16 s12, s14
+; CHECK-NEXT: vmul.f16 q2, q0, r2
+; CHECK-NEXT: vins.f16 s5, s7
+; CHECK-NEXT: vins.f16 s17, s19
; CHECK-NEXT: vins.f16 s21, s23
+; CHECK-NEXT: vmov.f32 s0, s13
+; CHECK-NEXT: vins.f16 s12, s14
; CHECK-NEXT: vmov.f32 s14, s4
; CHECK-NEXT: vmov.f32 s15, s16
-; CHECK-NEXT: vins.f16 s9, s1
-; CHECK-NEXT: vmov.f32 s13, s20
; CHECK-NEXT: vmul.f16 q6, q6, r2
+; CHECK-NEXT: vmov.f32 s13, s20
+; CHECK-NEXT: vmovx.f16 s6, s24
; CHECK-NEXT: vmul.f16 q3, q3, r2
-; CHECK-NEXT: vins.f16 s2, s7
-; CHECK-NEXT: vins.f16 s3, s19
+; CHECK-NEXT: vmov.f32 s2, s5
+; CHECK-NEXT: vmov.f32 s3, s17
+; CHECK-NEXT: vmovx.f16 s4, s12
; CHECK-NEXT: vmov.f32 s1, s21
+; CHECK-NEXT: vins.f16 s4, s6
; CHECK-NEXT: vmul.f16 q0, q0, r2
-; CHECK-NEXT: vmovx.f16 s4, s12
-; CHECK-NEXT: vmovx.f16 s6, s24
-; CHECK-NEXT: vmul.f16 q2, q2, r2
+; CHECK-NEXT: vmovx.f16 s6, s9
+; CHECK-NEXT: vmovx.f16 s5, s1
; CHECK-NEXT: vmovx.f16 s7, s0
; CHECK-NEXT: vins.f16 s0, s8
; CHECK-NEXT: vmovx.f16 s8, s8
-; CHECK-NEXT: vins.f16 s4, s6
-; CHECK-NEXT: vmovx.f16 s5, s1
-; CHECK-NEXT: vmovx.f16 s6, s9
; CHECK-NEXT: vins.f16 s7, s8
; CHECK-NEXT: vins.f16 s5, s6
; CHECK-NEXT: vmovx.f16 s6, s13
diff --git a/llvm/test/CodeGen/X86/machine-cp.ll b/llvm/test/CodeGen/X86/machine-cp.ll
index f84960485840d8..420856154dbb56 100644
--- a/llvm/test/CodeGen/X86/machine-cp.ll
+++ b/llvm/test/CodeGen/X86/machine-cp.ll
@@ -99,36 +99,35 @@ while.end: ; preds = %while.body, %entry
define <16 x float> @foo(<16 x float> %x) {
; CHECK-LABEL: foo:
; CHECK: ## %bb.0: ## %bb
-; CHECK-NEXT: xorps %xmm5, %xmm5
-; CHECK-NEXT: cvttps2dq %xmm3, %xmm8
; CHECK-NEXT: movaps %xmm3, %xmm4
+; CHECK-NEXT: xorps %xmm5, %xmm5
+; CHECK-NEXT: cvttps2dq %xmm3, %xmm3
; CHECK-NEXT: cmpltps %xmm5, %xmm4
; CHECK-NEXT: movaps {{.*#+}} xmm7 = [13,14,15,16]
; CHECK-NEXT: movaps %xmm4, %xmm6
; CHECK-NEXT: orps %xmm7, %xmm6
-; CHECK-NEXT: cvtdq2ps %xmm8, %xmm3
+; CHECK-NEXT: cvtdq2ps %xmm3, %xmm3
; CHECK-NEXT: andps %xmm7, %xmm3
; CHECK-NEXT: andps %xmm6, %xmm3
; CHECK-NEXT: andnps %xmm4, %xmm6
; CHECK-NEXT: cvttps2dq %xmm2, %xmm4
-; CHECK-NEXT: movaps %xmm2, %xmm7
-; CHECK-NEXT: cmpltps %xmm5, %xmm7
-; CHECK-NEXT: movaps {{.*#+}} xmm8 = [9,10,11,12]
-; CHECK-NEXT: movaps %xmm7, %xmm9
-; CHECK-NEXT: orps %xmm8, %xmm9
-; CHECK-NEXT: cvtdq2ps %xmm4, %xmm2
-; CHECK-NEXT: andps %xmm8, %xmm2
-; CHECK-NEXT: andps %xmm9, %xmm2
-; CHECK-NEXT: andnps %xmm7, %xmm9
-; CHECK-NEXT: cvttps2dq %xmm1, %xmm4
-; CHECK-NEXT: cmpltps %xmm5, %xmm1
-; CHECK-NEXT: movaps {{.*#+}} xmm7 = [5,6,7,8]
-; CHECK-NEXT: movaps %xmm1, %xmm8
+; CHECK-NEXT: cmpltps %xmm5, %xmm2
+; CHECK-NEXT: movaps {{.*#+}} xmm7 = [9,10,11,12]
+; CHECK-NEXT: movaps %xmm2, %xmm8
; CHECK-NEXT: orps %xmm7, %xmm8
; CHECK-NEXT: cvtdq2ps %xmm4, %xmm4
; CHECK-NEXT: andps %xmm7, %xmm4
; CHECK-NEXT: andps %xmm8, %xmm4
-; CHECK-NEXT: andnps %xmm1, %xmm8
+; CHECK-NEXT: andnps %xmm2, %xmm8
+; CHECK-NEXT: cvttps2dq %xmm1, %xmm2
+; CHECK-NEXT: cmpltps %xmm5, %xmm1
+; CHECK-NEXT: movaps {{.*#+}} xmm7 = [5,6,7,8]
+; CHECK-NEXT: movaps %xmm1, %xmm9
+; CHECK-NEXT: orps %xmm7, %xmm9
+; CHECK-NEXT: cvtdq2ps %xmm2, %xmm2
+; CHECK-NEXT: andps %xmm7, %xmm2
+; CHECK-NEXT: andps %xmm9, %xmm2
+; CHECK-NEXT: andnps %xmm1, %xmm9
; CHECK-NEXT: cvttps2dq %xmm0, %xmm1
; CHECK-NEXT: cmpltps %xmm5, %xmm0
; CHECK-NEXT: movaps {{.*#+}} xmm5 = [1,2,3,4]
@@ -141,14 +140,15 @@ define <16 x float> @foo(<16 x float> %x) {
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [1,1,1,1]
; CHECK-NEXT: andps %xmm0, %xmm7
; CHECK-NEXT: orps %xmm7, %xmm1
-; CHECK-NEXT: andps %xmm0, %xmm8
-; CHECK-NEXT: orps %xmm8, %xmm4
; CHECK-NEXT: andps %xmm0, %xmm9
; CHECK-NEXT: orps %xmm9, %xmm2
+; CHECK-NEXT: andps %xmm0, %xmm8
+; CHECK-NEXT: orps %xmm8, %xmm4
; CHECK-NEXT: andps %xmm0, %xmm6
; CHECK-NEXT: orps %xmm6, %xmm3
; CHECK-NEXT: movaps %xmm1, %xmm0
-; CHECK-NEXT: movaps %xmm4, %xmm1
+; CHECK-NEXT: movaps %xmm2, %xmm1
+; CHECK-NEXT: movaps %xmm4, %xmm2
; CHECK-NEXT: retq
bb:
%v3 = icmp slt <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, zeroinitializer
diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index 5da18ee6ad7c48..b7a98d0e2a7537 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -931,37 +931,37 @@ define void @interleave_24i16_out(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: movdqa %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm2, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: por %xmm5, %xmm6
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[2,1,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
-; SSE2-NEXT: pand %xmm6, %xmm5
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: pandn %xmm3, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,2,3,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,65535,0,0,0]
+; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm0[0,3,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,6]
-; SSE2-NEXT: movdqa %xmm6, %xmm8
+; SSE2-NEXT: movdqa %xmm5, %xmm8
; SSE2-NEXT: pandn %xmm7, %xmm8
-; SSE2-NEXT: por %xmm5, %xmm8
+; SSE2-NEXT: por %xmm3, %xmm8
; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: pandn %xmm3, %xmm4
-; SSE2-NEXT: por %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[3,1,2,0]
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,0]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
-; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; SSE2-NEXT: pandn %xmm0, %xmm6
-; SSE2-NEXT: por %xmm2, %xmm6
+; SSE2-NEXT: pandn %xmm0, %xmm5
+; SSE2-NEXT: por %xmm2, %xmm5
; SSE2-NEXT: movups %xmm1, (%rsi)
; SSE2-NEXT: movdqu %xmm8, (%rdx)
-; SSE2-NEXT: movdqu %xmm6, (%rcx)
+; SSE2-NEXT: movdqu %xmm5, (%rcx)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i16_out:
@@ -1080,38 +1080,38 @@ define void @interleave_24i16_out_reverse(ptr %p, ptr %q1, ptr %q2, ptr %q3) nou
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: movdqa %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: por %xmm5, %xmm6
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,3,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,1,0,3,4,5,6,7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
-; SSE2-NEXT: pand %xmm6, %xmm5
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: pandn %xmm3, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,3,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,0,3,4,5,6,7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,65535,0,0,0]
+; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,4,7,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,4,7]
-; SSE2-NEXT: movdqa %xmm6, %xmm8
+; SSE2-NEXT: movdqa %xmm5, %xmm8
; SSE2-NEXT: pandn %xmm7, %xmm8
-; SSE2-NEXT: por %xmm5, %xmm8
+; SSE2-NEXT: por %xmm3, %xmm8
; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm4
-; SSE2-NEXT: por %xmm1, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,1,2,0]
+; SSE2-NEXT: por %xmm6, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,0,1,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
-; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
-; SSE2-NEXT: pandn %xmm0, %xmm6
-; SSE2-NEXT: por %xmm1, %xmm6
+; SSE2-NEXT: pandn %xmm0, %xmm5
+; SSE2-NEXT: por %xmm1, %xmm5
; SSE2-NEXT: movups %xmm2, (%rsi)
; SSE2-NEXT: movdqu %xmm8, (%rdx)
-; SSE2-NEXT: movdqu %xmm6, (%rcx)
+; SSE2-NEXT: movdqu %xmm5, (%rcx)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i16_out_reverse:
diff --git a/llvm/test/CodeGen/X86/pmulh.ll b/llvm/test/CodeGen/X86/pmulh.ll
index c2a009f06b89df..de5d3b0d7bb7d5 100644
--- a/llvm/test/CodeGen/X86/pmulh.ll
+++ b/llvm/test/CodeGen/X86/pmulh.ll
@@ -1389,16 +1389,16 @@ define <64 x i32> @zext_mulhuw_v64i16_lshr(<64 x i16> %a, <64 x i16> %b) {
;
; SSE41-LABEL: zext_mulhuw_v64i16_lshr:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm1, %xmm9
; SSE41-NEXT: movq %rdi, %rax
-; SSE41-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm8
+; SSE41-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm0
; SSE41-NEXT: pxor %xmm11, %xmm11
-; SSE41-NEXT: movdqa %xmm8, %xmm0
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE41-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
-; SSE41-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE41-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm9
-; SSE41-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
+; SSE41-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm9
+; SSE41-NEXT: movdqa %xmm9, %xmm8
+; SSE41-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
; SSE41-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm10
; SSE41-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7]
@@ -1415,32 +1415,31 @@ define <64 x i32> @zext_mulhuw_v64i16_lshr(<64 x i16> %a, <64 x i16> %b) {
; SSE41-NEXT: movdqa %xmm6, %xmm15
; SSE41-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm11[4],xmm15[5],xmm11[5],xmm15[6],xmm11[6],xmm15[7],xmm11[7]
; SSE41-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm7
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
; SSE41-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
; SSE41-NEXT: movdqa %xmm7, 240(%rdi)
-; SSE41-NEXT: movdqa %xmm0, 224(%rdi)
+; SSE41-NEXT: movdqa %xmm1, 224(%rdi)
; SSE41-NEXT: movdqa %xmm15, 208(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; SSE41-NEXT: movdqa %xmm0, 192(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; SSE41-NEXT: movdqa %xmm1, 192(%rdi)
; SSE41-NEXT: movdqa %xmm14, 176(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; SSE41-NEXT: movdqa %xmm0, 160(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; SSE41-NEXT: movdqa %xmm1, 160(%rdi)
; SSE41-NEXT: movdqa %xmm13, 144(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; SSE41-NEXT: movdqa %xmm0, 128(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; SSE41-NEXT: movdqa %xmm1, 128(%rdi)
; SSE41-NEXT: movdqa %xmm12, 112(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: movdqa %xmm0, 96(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; SSE41-NEXT: movdqa %xmm1, 96(%rdi)
; SSE41-NEXT: movdqa %xmm10, 80(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; SSE41-NEXT: movdqa %xmm0, 64(%rdi)
-; SSE41-NEXT: movdqa %xmm9, 48(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: movdqa %xmm0, 32(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; SSE41-NEXT: movdqa %xmm1, 64(%rdi)
+; SSE41-NEXT: movdqa %xmm8, 48(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero
+; SSE41-NEXT: movdqa %xmm1, 32(%rdi)
+; SSE41-NEXT: movdqa %xmm0, 16(%rdi)
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE41-NEXT: movaps %xmm0, 16(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
-; SSE41-NEXT: movdqa %xmm0, (%rdi)
+; SSE41-NEXT: movaps %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX2-LABEL: zext_mulhuw_v64i16_lshr:
@@ -1570,16 +1569,16 @@ define <64 x i32> @mulhsw_v64i16_lshr(<64 x i16> %a, <64 x i16> %b) {
;
; SSE41-LABEL: mulhsw_v64i16_lshr:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: movdqa %xmm1, %xmm9
; SSE41-NEXT: movq %rdi, %rax
-; SSE41-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm8
+; SSE41-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm0
; SSE41-NEXT: pxor %xmm11, %xmm11
-; SSE41-NEXT: movdqa %xmm8, %xmm0
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE41-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
-; SSE41-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE41-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm9
-; SSE41-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
+; SSE41-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm9
+; SSE41-NEXT: movdqa %xmm9, %xmm8
+; SSE41-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
; SSE41-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm10
; SSE41-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7]
@@ -1596,32 +1595,31 @@ define <64 x i32> @mulhsw_v64i16_lshr(<64 x i16> %a, <64 x i16> %b) {
; SSE41-NEXT: movdqa %xmm6, %xmm15
; SSE41-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm11[4],xmm15[5],xmm11[5],xmm15[6],xmm11[6],xmm15[7],xmm11[7]
; SSE41-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm7
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
; SSE41-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
; SSE41-NEXT: movdqa %xmm7, 240(%rdi)
-; SSE41-NEXT: movdqa %xmm0, 224(%rdi)
+; SSE41-NEXT: movdqa %xmm1, 224(%rdi)
; SSE41-NEXT: movdqa %xmm15, 208(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; SSE41-NEXT: movdqa %xmm0, 192(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; SSE41-NEXT: movdqa %xmm1, 192(%rdi)
; SSE41-NEXT: movdqa %xmm14, 176(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; SSE41-NEXT: movdqa %xmm0, 160(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; SSE41-NEXT: movdqa %xmm1, 160(%rdi)
; SSE41-NEXT: movdqa %xmm13, 144(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; SSE41-NEXT: movdqa %xmm0, 128(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; SSE41-NEXT: movdqa %xmm1, 128(%rdi)
; SSE41-NEXT: movdqa %xmm12, 112(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: movdqa %xmm0, 96(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; SSE41-NEXT: movdqa %xmm1, 96(%rdi)
; SSE41-NEXT: movdqa %xmm10, 80(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; SSE41-NEXT: movdqa %xmm0, 64(%rdi)
-; SSE41-NEXT: movdqa %xmm9, 48(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: movdqa %xmm0, 32(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; SSE41-NEXT: movdqa %xmm1, 64(%rdi)
+; SSE41-NEXT: movdqa %xmm8, 48(%rdi)
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero
+; SSE41-NEXT: movdqa %xmm1, 32(%rdi)
+; SSE41-NEXT: movdqa %xmm0, 16(%rdi)
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE41-NEXT: movaps %xmm0, 16(%rdi)
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
-; SSE41-NEXT: movdqa %xmm0, (%rdi)
+; SSE41-NEXT: movaps %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX2-LABEL: mulhsw_v64i16_lshr:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
index 1436922f9dd114..2b228cb21851a8 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
@@ -439,37 +439,37 @@ define void @load_i16_stride3_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE-NEXT: movdqa %xmm4, %xmm5
; SSE-NEXT: pandn %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm3, %xmm6
-; SSE-NEXT: pand %xmm4, %xmm6
-; SSE-NEXT: por %xmm5, %xmm6
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
-; SSE-NEXT: pand %xmm6, %xmm5
+; SSE-NEXT: movdqa %xmm4, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm6
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: por %xmm5, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,2,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,65535,0,0,0]
+; SSE-NEXT: pand %xmm5, %xmm3
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm0[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,6]
-; SSE-NEXT: movdqa %xmm6, %xmm8
+; SSE-NEXT: movdqa %xmm5, %xmm8
; SSE-NEXT: pandn %xmm7, %xmm8
-; SSE-NEXT: por %xmm5, %xmm8
+; SSE-NEXT: por %xmm3, %xmm8
; SSE-NEXT: pand %xmm4, %xmm2
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[3,1,2,0]
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
-; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: pand %xmm5, %xmm2
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: por %xmm2, %xmm6
+; SSE-NEXT: pandn %xmm0, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
; SSE-NEXT: movaps %xmm1, (%rsi)
; SSE-NEXT: movdqa %xmm8, (%rdx)
-; SSE-NEXT: movdqa %xmm6, (%rcx)
+; SSE-NEXT: movdqa %xmm5, (%rcx)
; SSE-NEXT: retq
;
; AVX-LABEL: load_i16_stride3_vf8:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
index 8e55cb48cf7a2e..fb4ecba1bf8524 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
@@ -715,34 +715,34 @@ define void @load_i16_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-LABEL: load_i16_stride5_vf8:
; SSE: # %bb.0:
; SSE-NEXT: movdqa 64(%rdi), %xmm6
-; SSE-NEXT: movdqa (%rdi), %xmm4
-; SSE-NEXT: movdqa 16(%rdi), %xmm3
+; SSE-NEXT: movdqa (%rdi), %xmm5
+; SSE-NEXT: movdqa 16(%rdi), %xmm4
; SSE-NEXT: movdqa 32(%rdi), %xmm0
-; SSE-NEXT: movdqa 48(%rdi), %xmm5
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,0,65535,65535,65535]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,1,0,3]
-; SSE-NEXT: pand %xmm1, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,2,2,3]
+; SSE-NEXT: movdqa 48(%rdi), %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,0,3]
+; SSE-NEXT: pand %xmm2, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[3,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,3,2,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,3]
-; SSE-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,65535,65535,0]
-; SSE-NEXT: andps %xmm1, %xmm7
+; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm2[2,3]
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,65535,65535,0]
+; SSE-NEXT: andps %xmm2, %xmm7
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,1,0,1]
-; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: pandn %xmm8, %xmm2
-; SSE-NEXT: por %xmm7, %xmm2
-; SSE-NEXT: movdqa %xmm3, %xmm7
+; SSE-NEXT: movaps %xmm2, %xmm3
+; SSE-NEXT: pandn %xmm8, %xmm3
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm4, %xmm7
; SSE-NEXT: psrlq $48, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[0,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm5[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,2,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [0,0,0,65535,65535,65535,65535,65535]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm5[1,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm1[1,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm10[0,1,2,3,7,5,6,7]
@@ -752,65 +752,64 @@ define void @load_i16_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pand %xmm7, %xmm9
; SSE-NEXT: pandn %xmm8, %xmm7
; SSE-NEXT: por %xmm9, %xmm7
-; SSE-NEXT: pand %xmm1, %xmm7
+; SSE-NEXT: pand %xmm2, %xmm7
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,1,2,0]
; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,1,0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,1,1,3]
; SSE-NEXT: psllq $48, %xmm6
-; SSE-NEXT: pandn %xmm6, %xmm1
-; SSE-NEXT: por %xmm7, %xmm1
-; SSE-NEXT: movdqa %xmm5, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,2,2,3]
-; SSE-NEXT: movdqa %xmm5, %xmm12
-; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[1,0],xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0],xmm0[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,0,1,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,65535,65535,0,0,0,65535,65535]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,1,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; SSE-NEXT: pand %xmm13, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm4[1,1,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm3[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm11[0],xmm14[1],xmm11[1]
-; SSE-NEXT: movdqa %xmm13, %xmm15
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm14[0,3,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[1,0,3,3,4,5,6,7]
-; SSE-NEXT: pand %xmm13, %xmm11
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2]
-; SSE-NEXT: movdqa %xmm13, %xmm4
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,3,3,4,5,6,7]
-; SSE-NEXT: pand %xmm13, %xmm3
-; SSE-NEXT: pandn %xmm12, %xmm13
-; SSE-NEXT: por %xmm13, %xmm5
-; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pandn %xmm6, %xmm2
+; SSE-NEXT: por %xmm7, %xmm2
+; SSE-NEXT: movdqa %xmm1, %xmm11
+; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm0[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm0[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,0,1,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,1,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; SSE-NEXT: pand %xmm12, %xmm6
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[1,1,1,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm4[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm7[0],xmm13[1],xmm7[1]
+; SSE-NEXT: movdqa %xmm12, %xmm14
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm13[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,3,3,4,5,6,7]
+; SSE-NEXT: pand %xmm12, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm4[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0,2]
+; SSE-NEXT: movdqa %xmm12, %xmm5
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[2,1,3,3,4,5,6,7]
+; SSE-NEXT: pand %xmm12, %xmm4
+; SSE-NEXT: pandn %xmm11, %xmm12
+; SSE-NEXT: por %xmm12, %xmm6
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,6,5]
-; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,1],xmm12[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm8[2,0]
+; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,1],xmm11[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm8[2,0]
; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm0[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pandn %xmm8, %xmm15
-; SSE-NEXT: por %xmm15, %xmm11
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm0[3,0]
-; SSE-NEXT: pandn %xmm0, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm7[0,2]
+; SSE-NEXT: pandn %xmm8, %xmm14
+; SSE-NEXT: por %xmm14, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm1[0,2,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
+; SSE-NEXT: pandn %xmm0, %xmm5
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,4,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm9[0,1,2,3,4,5,5,6]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,1],xmm0[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm7[2,0]
-; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[0,1,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm10[0,1,2,3,4,5,4,7]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm0[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,0]
-; SSE-NEXT: movdqa %xmm2, (%rsi)
-; SSE-NEXT: movdqa %xmm1, (%rdx)
-; SSE-NEXT: movaps %xmm5, (%rcx)
-; SSE-NEXT: movaps %xmm11, (%r8)
-; SSE-NEXT: movaps %xmm3, (%r9)
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,4,5,5,6]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[0,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm10[0,1,2,3,4,5,4,7]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm1[2,0]
+; SSE-NEXT: movdqa %xmm3, (%rsi)
+; SSE-NEXT: movdqa %xmm2, (%rdx)
+; SSE-NEXT: movaps %xmm6, (%rcx)
+; SSE-NEXT: movaps %xmm7, (%r8)
+; SSE-NEXT: movaps %xmm4, (%r9)
; SSE-NEXT: retq
;
; AVX-LABEL: load_i16_stride5_vf8:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
index 1b637cd203c8f9..d8bb1865354b5e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
@@ -1530,197 +1530,190 @@ define void @load_i16_stride8_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i16_stride8_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6, ptr %out.vec7) nounwind {
; SSE-LABEL: load_i16_stride8_vf16:
; SSE: # %bb.0:
-; SSE-NEXT: subq $168, %rsp
-; SSE-NEXT: movdqa 112(%rdi), %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 96(%rdi), %xmm14
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: subq $184, %rsp
+; SSE-NEXT: movdqa 112(%rdi), %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 96(%rdi), %xmm13
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 208(%rdi), %xmm0
-; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT: movdqa 192(%rdi), %xmm5
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 192(%rdi), %xmm3
; SSE-NEXT: movdqa 240(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 224(%rdi), %xmm12
-; SSE-NEXT: movdqa 144(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 128(%rdi), %xmm10
-; SSE-NEXT: movdqa 176(%rdi), %xmm2
+; SSE-NEXT: movdqa 224(%rdi), %xmm10
+; SSE-NEXT: movdqa %xmm10, (%rsp) # 16-byte Spill
+; SSE-NEXT: movdqa 144(%rdi), %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 160(%rdi), %xmm11
-; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm2[0],xmm11[1],xmm2[1],xmm11[2],xmm2[2],xmm11[3],xmm2[3]
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE-NEXT: movdqa %xmm12, %xmm4
-; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,0,0]
-; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: movdqa 128(%rdi), %xmm6
+; SSE-NEXT: movdqa 176(%rdi), %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 160(%rdi), %xmm12
+; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3]
+; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1],xmm10[2],xmm1[2],xmm10[3],xmm1[3]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[0,0,0,0]
+; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,0,0]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
-; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE-NEXT: movdqa %xmm4, %xmm2
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3]
; SSE-NEXT: movdqa 80(%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 64(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm7
+; SSE-NEXT: movdqa 64(%rdi), %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm7
; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm14[0,0,0,0]
-; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm7[0,0,0,0]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm6[2],xmm15[3],xmm6[3]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm13[0,0,0,0]
+; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm7[0,0,0,0]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm14 = xmm14[2],xmm5[2],xmm14[3],xmm5[3]
; SSE-NEXT: movdqa 32(%rdi), %xmm9
; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 48(%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE-NEXT: movdqa (%rdi), %xmm6
+; SSE-NEXT: movdqa (%rdi), %xmm5
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm6, %xmm8
+; SSE-NEXT: movdqa %xmm5, %xmm8
; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
; SSE-NEXT: movdqa %xmm8, %xmm0
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm15[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm14[2,3]
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm11[1,1,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1]
-; SSE-NEXT: movdqa %xmm1, %xmm15
-; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm15[2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm12[1,1,1,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
+; SSE-NEXT: movdqa %xmm1, %xmm14
+; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm14[2,3]
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm9[1,1,1,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm9[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1]
-; SSE-NEXT: movdqa %xmm7, %xmm15
-; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm15[2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
+; SSE-NEXT: movdqa %xmm7, %xmm14
+; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm14[2,3]
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm4[2,2,2,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm10[2,2,2,2]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,2,2]
; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; SSE-NEXT: movdqa %xmm2, %xmm15
-; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
-; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm15[0],xmm0[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm4[3,3,3,3]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm12[2],xmm4[3],xmm12[3]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm14[2,2,2,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm13[2,2,2,2]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,2,2]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm8[3,3,3,3]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm8[3,3,3,3]
; SSE-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[2,3]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[3,3,3,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3]
-; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm14[2],xmm7[3],xmm14[3]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[3,3,3,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
+; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm1[2,3]
+; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm13[2],xmm7[3],xmm13[3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[3,3,3,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
-; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm7[2,3]
-; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; SSE-NEXT: # xmm13 = xmm13[4],mem[4],xmm13[5],mem[5],xmm13[6],mem[6],xmm13[7],mem[7]
-; SSE-NEXT: movdqa %xmm10, %xmm15
+; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
+; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,1],xmm7[2,3]
+; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; SSE-NEXT: # xmm11 = xmm11[4],mem[4],xmm11[5],mem[5],xmm11[6],mem[6],xmm11[7],mem[7]
+; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: # xmm6 = xmm6[4],mem[4],xmm6[5],mem[5],xmm6[6],mem[6],xmm6[7],mem[7]
+; SSE-NEXT: movdqa (%rsp), %xmm15 # 16-byte Reload
; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
; SSE-NEXT: # xmm15 = xmm15[4],mem[4],xmm15[5],mem[5],xmm15[6],mem[6],xmm15[7],mem[7]
-; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; SSE-NEXT: # xmm12 = xmm12[4],mem[4],xmm12[5],mem[5],xmm12[6],mem[6],xmm12[7],mem[7]
-; SSE-NEXT: movdqa %xmm5, %xmm2
-; SSE-NEXT: punpckhwd (%rsp), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[0,0,0,0]
+; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm0[0,0,0,0]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm14 = xmm14[2],xmm1[2],xmm14[3],xmm1[3]
+; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
+; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm1[0],xmm14[1]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; SSE-NEXT: # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
+; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[0,0,0,0]
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm2[0,0,0,0]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm1[2],xmm12[3],xmm1[3]
-; SSE-NEXT: movdqa %xmm15, %xmm1
-; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1]
-; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm1[0],xmm12[1]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; SSE-NEXT: # xmm10 = xmm10[4],mem[4],xmm10[5],mem[5],xmm10[6],mem[6],xmm10[7],mem[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,0,0,0]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,0,0,0]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: # xmm3 = xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,0,0,0]
-; SSE-NEXT: movdqa %xmm10, %xmm14
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,0,0]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
; SSE-NEXT: # xmm5 = xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
-; SSE-NEXT: punpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT: # xmm6 = xmm6[4],mem[4],xmm6[5],mem[5],xmm6[6],mem[6],xmm6[7],mem[7]
-; SSE-NEXT: movdqa %xmm6, %xmm1
-; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm15[1,1,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm13[1,1,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
-; SSE-NEXT: movdqa %xmm2, %xmm7
-; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm7[2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm5[1,1,1,1]
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm11[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,1,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
-; SSE-NEXT: movdqa %xmm3, %xmm9
-; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
+; SSE-NEXT: movdqa %xmm0, %xmm9
+; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1]
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm9[2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm0, %xmm10
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm2[2,2,2,2]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm11[2],xmm9[3],xmm11[3]
-; SSE-NEXT: movdqa %xmm15, %xmm11
-; SSE-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm13[2],xmm11[3],xmm13[3]
-; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm11[0],xmm9[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm14[2,2,2,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,2,2]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm11[2],xmm0[3],xmm11[3]
-; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm6[3,3,3,3]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[2,3]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm10[2],xmm2[3],xmm10[3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[3,3,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm13[3,3,3,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
-; SSE-NEXT: punpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[3,3,3,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm5[0],xmm14[1],xmm5[1]
-; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,1],xmm3[2,3]
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movaps %xmm2, (%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps %xmm3, 16(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movaps %xmm2, (%rdx)
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm3[1,1,1,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm5[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
+; SSE-NEXT: movdqa %xmm2, %xmm10
+; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1]
+; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm10[2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm15[2,2,2,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm0[2,2,2,2]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm6[3,3,3,3]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm11[2],xmm6[3],xmm11[3]
+; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm6[0],xmm10[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,2,2,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm2[2,2,2,2]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm6[2],xmm12[3],xmm6[3]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[3,3,3,3]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm12[2,3]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm11[3,3,3,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
+; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,1],xmm0[2,3]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm2[2,3]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, (%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movaps %xmm2, 16(%rdx)
+; SSE-NEXT: movaps %xmm2, 16(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, (%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, 16(%rdx)
; SSE-NEXT: movaps %xmm8, (%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movaps %xmm2, 16(%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movaps %xmm2, (%r8)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movaps %xmm2, 16(%r8)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, 16(%rcx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, (%r8)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, 16(%r8)
; SSE-NEXT: movaps %xmm1, (%r9)
-; SSE-NEXT: movapd %xmm12, 16(%r9)
+; SSE-NEXT: movapd %xmm14, 16(%r9)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movaps %xmm7, (%rax)
-; SSE-NEXT: movaps %xmm4, 16(%rax)
+; SSE-NEXT: movaps %xmm9, (%rax)
+; SSE-NEXT: movaps %xmm7, 16(%rax)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movaps %xmm6, (%rax)
-; SSE-NEXT: movapd %xmm9, 16(%rax)
+; SSE-NEXT: movaps %xmm5, (%rax)
+; SSE-NEXT: movapd %xmm10, 16(%rax)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movaps %xmm0, 16(%rax)
-; SSE-NEXT: movaps %xmm14, (%rax)
-; SSE-NEXT: addq $168, %rsp
+; SSE-NEXT: movaps %xmm13, 16(%rax)
+; SSE-NEXT: movaps %xmm6, (%rax)
+; SSE-NEXT: addq $184, %rsp
; SSE-NEXT: retq
;
; AVX-LABEL: load_i16_stride8_vf16:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
index a0ea6ddeca7dfd..8b0dede0bf33bc 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
@@ -745,99 +745,91 @@ define void @load_i32_stride3_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i32_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
; SSE-LABEL: load_i32_stride3_vf16:
; SSE: # %bb.0:
-; SSE-NEXT: movaps 96(%rdi), %xmm6
-; SSE-NEXT: movaps 128(%rdi), %xmm1
-; SSE-NEXT: movaps 112(%rdi), %xmm13
-; SSE-NEXT: movaps 144(%rdi), %xmm11
-; SSE-NEXT: movaps 176(%rdi), %xmm10
-; SSE-NEXT: movaps 160(%rdi), %xmm9
-; SSE-NEXT: movaps (%rdi), %xmm7
-; SSE-NEXT: movaps 16(%rdi), %xmm8
-; SSE-NEXT: movaps 32(%rdi), %xmm3
-; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movaps 48(%rdi), %xmm15
-; SSE-NEXT: movaps 80(%rdi), %xmm14
-; SSE-NEXT: movaps 64(%rdi), %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm14[1,0]
-; SSE-NEXT: movaps %xmm15, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm0[0,2]
-; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movaps %xmm8, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[1,0]
-; SSE-NEXT: movaps %xmm7, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm0[0,2]
-; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movaps %xmm9, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm10[1,0]
+; SSE-NEXT: movaps 96(%rdi), %xmm15
+; SSE-NEXT: movaps 128(%rdi), %xmm11
+; SSE-NEXT: movaps 112(%rdi), %xmm14
+; SSE-NEXT: movaps 144(%rdi), %xmm12
+; SSE-NEXT: movaps 176(%rdi), %xmm2
+; SSE-NEXT: movaps 160(%rdi), %xmm8
+; SSE-NEXT: movaps (%rdi), %xmm9
+; SSE-NEXT: movaps 16(%rdi), %xmm6
+; SSE-NEXT: movaps 32(%rdi), %xmm10
; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movaps %xmm11, %xmm3
-; SSE-NEXT: movaps %xmm11, %xmm4
-; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
-; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movaps %xmm13, %xmm0
+; SSE-NEXT: movaps 48(%rdi), %xmm5
+; SSE-NEXT: movaps 80(%rdi), %xmm1
+; SSE-NEXT: movaps 64(%rdi), %xmm3
+; SSE-NEXT: movaps %xmm3, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,0]
-; SSE-NEXT: movaps %xmm1, %xmm12
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movaps %xmm6, %xmm5
-; SSE-NEXT: movaps %xmm6, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm0[0,2]
-; SSE-NEXT: movaps %xmm15, %xmm11
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm2[0,0]
-; SSE-NEXT: movaps %xmm2, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm14[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm0[0,2]
-; SSE-NEXT: movaps %xmm4, %xmm6
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm9[0,0]
-; SSE-NEXT: movaps %xmm9, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm10[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm0[0,2]
-; SSE-NEXT: movaps %xmm3, %xmm1
-; SSE-NEXT: movaps %xmm3, %xmm10
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm13[0,0]
-; SSE-NEXT: movaps %xmm13, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm12[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm8[0,0]
-; SSE-NEXT: movaps %xmm8, %xmm12
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[3,1],xmm4[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm12[0,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm2[1,1,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[2,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm14[0,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[1,1,1,1]
+; SSE-NEXT: movaps %xmm5, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm0[0,2]
+; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movaps %xmm6, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm10[1,0]
+; SSE-NEXT: movaps %xmm9, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm0[0,2]
+; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movaps %xmm8, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[1,0]
+; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movaps %xmm12, %xmm10
+; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,3],xmm0[0,2]
+; SSE-NEXT: movaps %xmm14, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm11[1,0]
+; SSE-NEXT: movaps %xmm11, %xmm13
+; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movaps %xmm15, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm0[0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm5[2,3,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm3[0,0]
+; SSE-NEXT: movaps %xmm3, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm0[0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm12[2,3,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[1,0],xmm8[0,0]
+; SSE-NEXT: movaps %xmm8, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm2[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,2],xmm0[0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,3,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,0],xmm14[0,0]
+; SSE-NEXT: movaps %xmm14, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm13[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,2],xmm0[0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,3,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0],xmm6[0,0]
+; SSE-NEXT: movaps %xmm6, %xmm13
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[3,1],xmm2[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm13[0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm3[0],xmm11[1],xmm3[1]
+; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; SSE-NEXT: # xmm11 = xmm11[0,1],mem[0,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,1,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[0,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[1,1,1,1]
-; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
-; SSE-NEXT: # xmm8 = mem[2,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
-; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
-; SSE-NEXT: # xmm8 = xmm8[0,1],mem[0,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[1,1,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm10[2,3,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[1,1,1,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; SSE-NEXT: # xmm4 = xmm4[0,1],mem[0,3]
-; SSE-NEXT: movaps %xmm5, 32(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps %xmm3, 48(%rsi)
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm14[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: # xmm1 = xmm1[0,1],mem[0,3]
+; SSE-NEXT: movaps %xmm7, 32(%rsi)
+; SSE-NEXT: movaps %xmm10, 48(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movaps %xmm3, (%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movaps %xmm3, 16(%rsi)
-; SSE-NEXT: movaps %xmm1, 32(%rdx)
-; SSE-NEXT: movaps %xmm6, 48(%rdx)
-; SSE-NEXT: movaps %xmm7, (%rdx)
-; SSE-NEXT: movaps %xmm11, 16(%rdx)
-; SSE-NEXT: movaps %xmm4, 32(%rcx)
-; SSE-NEXT: movaps %xmm8, 48(%rcx)
+; SSE-NEXT: movaps %xmm15, 32(%rdx)
+; SSE-NEXT: movaps %xmm12, 48(%rdx)
+; SSE-NEXT: movaps %xmm9, (%rdx)
+; SSE-NEXT: movaps %xmm5, 16(%rdx)
+; SSE-NEXT: movaps %xmm1, 32(%rcx)
+; SSE-NEXT: movaps %xmm4, 48(%rcx)
; SSE-NEXT: movaps %xmm0, (%rcx)
-; SSE-NEXT: movaps %xmm2, 16(%rcx)
+; SSE-NEXT: movaps %xmm11, 16(%rcx)
; SSE-NEXT: retq
;
; AVX-LABEL: load_i32_stride3_vf16:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
index aae4d9fa15e246..fc378b37d149a7 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
@@ -880,122 +880,117 @@ define void @load_i32_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
; SSE-LABEL: load_i32_stride6_vf8:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa 144(%rdi), %xmm4
-; SSE-NEXT: movdqa 160(%rdi), %xmm2
-; SSE-NEXT: movdqa 96(%rdi), %xmm6
-; SSE-NEXT: movdqa 112(%rdi), %xmm3
-; SSE-NEXT: movdqa 64(%rdi), %xmm5
-; SSE-NEXT: movdqa (%rdi), %xmm10
-; SSE-NEXT: movdqa 16(%rdi), %xmm0
+; SSE-NEXT: movdqa 144(%rdi), %xmm12
+; SSE-NEXT: movdqa 160(%rdi), %xmm4
+; SSE-NEXT: movdqa 96(%rdi), %xmm7
+; SSE-NEXT: movdqa 112(%rdi), %xmm5
+; SSE-NEXT: movdqa 64(%rdi), %xmm3
+; SSE-NEXT: movdqa (%rdi), %xmm9
+; SSE-NEXT: movdqa 16(%rdi), %xmm6
; SSE-NEXT: movdqa 48(%rdi), %xmm8
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm10[2,3,2,3]
-; SSE-NEXT: movdqa %xmm10, %xmm7
-; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,2,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,0,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
-; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm7[0],xmm9[1]
-; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm6[1,1,1,1]
-; SSE-NEXT: movdqa %xmm6, %xmm9
-; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
-; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm9[0],xmm7[1]
-; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[3,3,3,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm9[2,3,2,3]
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,2,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm8[0,0,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm2[0],xmm10[1]
+; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
-; SSE-NEXT: movdqa %xmm5, %xmm9
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,2,3,3]
-; SSE-NEXT: movdqa %xmm8, %xmm11
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[1,1,1,1]
+; SSE-NEXT: movdqa %xmm7, %xmm10
+; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,2,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm12[0,0,1,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1]
-; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm0[0],xmm11[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[3,3,3,3]
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE-NEXT: movdqa %xmm4, %xmm12
+; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm10[0],xmm11[1]
+; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[3,3,3,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
+; SSE-NEXT: movdqa %xmm3, %xmm11
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm8[2,2,3,3]
+; SSE-NEXT: movdqa %xmm8, %xmm3
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
+; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,3,3,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
+; SSE-NEXT: movdqa %xmm12, %xmm1
; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
; SSE-NEXT: movdqa 80(%rdi), %xmm14
-; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm15[0],xmm12[1]
+; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm2[0],xmm12[1]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,0,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; SSE-NEXT: movdqa 32(%rdi), %xmm7
-; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm7[0],xmm13[1],xmm7[1]
-; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm13[0],xmm5[1]
-; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm4[2,2,3,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
+; SSE-NEXT: movdqa 32(%rdi), %xmm10
+; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1]
+; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm13[0],xmm15[1]
+; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm1[2,2,3,3]
; SSE-NEXT: movdqa 176(%rdi), %xmm15
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[0,0,1,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
-; SSE-NEXT: movdqa 128(%rdi), %xmm5
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE-NEXT: movdqa 128(%rdi), %xmm7
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[3,3,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm7[1,1,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[3,3,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm10[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1]
; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm0[0],xmm8[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,3,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1]
-; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm3, %xmm10
-; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[0,0,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
-; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm10[0],xmm6[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm5[2,3,2,3]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,2,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm2[0,0,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1]
-; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm0[0],xmm10[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[3,3,3,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,3,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm7[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm1[2,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1]
; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
-; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: # xmm0 = mem[1,1,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, 16(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, (%rsi)
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[2,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,0,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm6[0],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm15[2,2,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[3,3,3,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm14[2,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm3[0],xmm11[1],xmm3[1]
+; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm2[0],xmm11[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[3,3,3,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[2,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm6[0],xmm4[1]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, 16(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, (%rsi)
; SSE-NEXT: movapd %xmm12, 16(%rdx)
-; SSE-NEXT: movapd %xmm11, (%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, (%rdx)
; SSE-NEXT: movapd %xmm13, 16(%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, (%rcx)
-; SSE-NEXT: movapd %xmm4, 16(%r8)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, (%rcx)
+; SSE-NEXT: movapd %xmm9, 16(%r8)
; SSE-NEXT: movapd %xmm8, (%r8)
-; SSE-NEXT: movapd %xmm10, 16(%r9)
-; SSE-NEXT: movapd %xmm6, (%r9)
+; SSE-NEXT: movapd %xmm0, 16(%r9)
+; SSE-NEXT: movapd %xmm1, (%r9)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movapd %xmm2, 16(%rax)
-; SSE-NEXT: movapd %xmm9, (%rax)
+; SSE-NEXT: movapd %xmm4, 16(%rax)
+; SSE-NEXT: movapd %xmm11, (%rax)
; SSE-NEXT: retq
;
; AVX-LABEL: load_i32_stride6_vf8:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
index 15f6ef4006fddf..0f6056a6ca6b2f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
@@ -409,61 +409,61 @@ define void @load_i8_stride4_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i8_stride4_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind {
; SSE-LABEL: load_i8_stride4_vf8:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa (%rdi), %xmm3
-; SSE-NEXT: movdqa 16(%rdi), %xmm4
+; SSE-NEXT: movdqa (%rdi), %xmm1
+; SSE-NEXT: movdqa 16(%rdi), %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0]
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm4[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm3, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pxor %xmm6, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,2,2,3]
+; SSE-NEXT: pxor %xmm5, %xmm5
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm2[3,1,2,3,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[3,1,2,3,4,5,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm3[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1]
-; SSE-NEXT: packuswb %xmm7, %xmm6
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,2,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,5,6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,3,2,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm1[3,1,2,3,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
+; SSE-NEXT: packuswb %xmm7, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,5,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm7, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pand %xmm7, %xmm6
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm9[0,1,2,3,7,5,6,7]
; SSE-NEXT: pand %xmm7, %xmm8
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,3,2,4,5,6,7]
-; SSE-NEXT: packuswb %xmm5, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm7[0,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
+; SSE-NEXT: packuswb %xmm6, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[0,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[3,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[3,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; SSE-NEXT: packuswb %xmm2, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE-NEXT: packuswb %xmm2, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: movq %xmm0, (%rsi)
-; SSE-NEXT: movq %xmm6, (%rdx)
-; SSE-NEXT: movq %xmm5, (%rcx)
+; SSE-NEXT: movq %xmm5, (%rdx)
+; SSE-NEXT: movq %xmm6, (%rcx)
; SSE-NEXT: movq %xmm1, (%r8)
; SSE-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
index e05b5ab9ebe025..8707e1851ffdfa 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
@@ -1117,277 +1117,288 @@ define void @load_i8_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
; SSE-LABEL: load_i8_stride5_vf16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa 64(%rdi), %xmm9
-; SSE-NEXT: movdqa (%rdi), %xmm1
+; SSE-NEXT: movdqa 64(%rdi), %xmm7
+; SSE-NEXT: movdqa (%rdi), %xmm2
; SSE-NEXT: movdqa 16(%rdi), %xmm6
-; SSE-NEXT: movdqa 32(%rdi), %xmm10
-; SSE-NEXT: movdqa 48(%rdi), %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: movdqa 32(%rdi), %xmm1
+; SSE-NEXT: movdqa 48(%rdi), %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm13
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: movdqa %xmm3, %xmm14
+; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: pxor %xmm8, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm6, %xmm11
; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pand %xmm5, %xmm4
; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: pxor %xmm8, %xmm8
; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,7]
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [0,65535,65535,65535,0,0,65535,65535]
; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,4,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; SSE-NEXT: packuswb %xmm4, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; SSE-NEXT: movdqa %xmm4, %xmm5
-; SSE-NEXT: pandn %xmm6, %xmm5
-; SSE-NEXT: movdqa %xmm6, %xmm15
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: movdqa %xmm1, %xmm13
-; SSE-NEXT: pand %xmm4, %xmm6
-; SSE-NEXT: por %xmm5, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm5
-; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [0,65535,65535,65535,0,0,65535,65535]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
-; SSE-NEXT: pand %xmm7, %xmm6
-; SSE-NEXT: pandn %xmm5, %xmm7
-; SSE-NEXT: por %xmm6, %xmm7
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,4,6,5,7]
-; SSE-NEXT: packuswb %xmm7, %xmm7
-; SSE-NEXT: pand %xmm11, %xmm7
-; SSE-NEXT: movdqa %xmm11, %xmm5
-; SSE-NEXT: pandn %xmm0, %xmm5
-; SSE-NEXT: por %xmm5, %xmm7
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
-; SSE-NEXT: pand %xmm6, %xmm7
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm9[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm9[2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,4]
+; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: por %xmm4, %xmm6
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm6, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: por %xmm7, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm4, %xmm7
-; SSE-NEXT: pandn %xmm10, %xmm7
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: por %xmm7, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm12
-; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
+; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm7[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm7[2,3]
+; SSE-NEXT: movaps %xmm7, %xmm8
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,4]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: pandn %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: movdqa %xmm14, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: pxor %xmm13, %xmm13
; SSE-NEXT: movdqa %xmm0, %xmm7
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm12[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm12[2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm14, %xmm12
-; SSE-NEXT: movdqa %xmm13, %xmm1
-; SSE-NEXT: pandn %xmm13, %xmm12
-; SSE-NEXT: movdqa %xmm15, %xmm13
-; SSE-NEXT: movdqa %xmm15, %xmm2
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm14, %xmm13
-; SSE-NEXT: por %xmm12, %xmm13
-; SSE-NEXT: movdqa %xmm13, %xmm12
-; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT: pand %xmm15, %xmm13
-; SSE-NEXT: pandn %xmm12, %xmm15
-; SSE-NEXT: por %xmm13, %xmm15
-; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm15[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,7,6,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[1,2,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,4,5,7]
-; SSE-NEXT: packuswb %xmm12, %xmm12
-; SSE-NEXT: pand %xmm11, %xmm12
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm1[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm1[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,2,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; SSE-NEXT: psllq $48, %xmm0
-; SSE-NEXT: packuswb %xmm7, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm7
-; SSE-NEXT: pandn %xmm0, %xmm11
-; SSE-NEXT: por %xmm11, %xmm12
-; SSE-NEXT: pand %xmm6, %xmm12
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm9[3,0]
-; SSE-NEXT: movaps %xmm9, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm7[0,2]
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm14, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
+; SSE-NEXT: movdqa %xmm11, %xmm2
+; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm11, %xmm7
+; SSE-NEXT: pand %xmm14, %xmm7
+; SSE-NEXT: por %xmm1, %xmm7
+; SSE-NEXT: movdqa %xmm7, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm13[8],xmm1[9],xmm13[9],xmm1[10],xmm13[10],xmm1[11],xmm13[11],xmm1[12],xmm13[12],xmm1[13],xmm13[13],xmm1[14],xmm13[14],xmm1[15],xmm13[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,0,0,65535,65535,65535,0]
+; SSE-NEXT: pand %xmm11, %xmm7
+; SSE-NEXT: pandn %xmm1, %xmm11
+; SSE-NEXT: por %xmm7, %xmm11
+; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm11[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,2,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,4,5,7]
+; SSE-NEXT: packuswb %xmm7, %xmm7
+; SSE-NEXT: pand %xmm12, %xmm7
+; SSE-NEXT: pandn %xmm0, %xmm12
+; SSE-NEXT: por %xmm7, %xmm12
+; SSE-NEXT: pand %xmm10, %xmm12
+; SSE-NEXT: movaps %xmm8, %xmm0
+; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm8[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm6, %xmm5
-; SSE-NEXT: pandn %xmm0, %xmm5
-; SSE-NEXT: por %xmm12, %xmm5
-; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: por %xmm12, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: pand %xmm12, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: pand %xmm12, %xmm1
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm13, %xmm15
-; SSE-NEXT: pandn %xmm0, %xmm15
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
-; SSE-NEXT: pand %xmm13, %xmm2
-; SSE-NEXT: por %xmm15, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[2,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm13, %xmm7
+; SSE-NEXT: pandn %xmm0, %xmm7
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE-NEXT: pand %xmm13, %xmm1
+; SSE-NEXT: por %xmm7, %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm15 = [0,0,0,65535,65535,65535,65535,65535]
-; SSE-NEXT: pandn %xmm0, %xmm15
-; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: pandn %xmm11, %xmm0
-; SSE-NEXT: movdqa %xmm11, %xmm7
+; SSE-NEXT: movdqa %xmm15, %xmm6
+; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: pandn %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm4, %xmm7
+; SSE-NEXT: movdqa %xmm4, %xmm11
; SSE-NEXT: pand %xmm14, %xmm7
-; SSE-NEXT: pandn %xmm10, %xmm14
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: pandn %xmm0, %xmm14
; SSE-NEXT: pand %xmm12, %xmm11
-; SSE-NEXT: pandn %xmm10, %xmm12
-; SSE-NEXT: pand %xmm4, %xmm10
-; SSE-NEXT: por %xmm0, %xmm10
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,1,2,0]
-; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[2,0],xmm0[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm10[0,2]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pandn %xmm0, %xmm12
+; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,2,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,5]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6,5]
; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE-NEXT: por %xmm15, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm2[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: por %xmm6, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm6, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pand %xmm6, %xmm1
-; SSE-NEXT: por %xmm1, %xmm10
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: pandn %xmm5, %xmm3
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,0]
-; SSE-NEXT: por %xmm7, %xmm14
-; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm1[8],xmm14[9],xmm1[9],xmm14[10],xmm1[10],xmm14[11],xmm1[11],xmm14[12],xmm1[12],xmm14[13],xmm1[13],xmm14[14],xmm1[14],xmm14[15],xmm1[15]
-; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[3,1],xmm0[2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm14[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,0,0,65535,65535,65535,65535,65535]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm3[0,1,2,3,4,5,6,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[3,1,2,0]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,4,6,7]
-; SSE-NEXT: packuswb %xmm7, %xmm7
-; SSE-NEXT: pandn %xmm7, %xmm1
-; SSE-NEXT: movaps %xmm9, %xmm7
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm2[3,0]
+; SSE-NEXT: packuswb %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: pand %xmm10, %xmm1
; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movaps %xmm2, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm7[0,2]
-; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: pandn %xmm8, %xmm9
+; SSE-NEXT: por %xmm1, %xmm9
+; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm2[0],xmm9[1],xmm2[1],xmm9[2],xmm2[2],xmm9[3],xmm2[3],xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
+; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[3,1],xmm1[2,0]
+; SSE-NEXT: por %xmm7, %xmm14
+; SSE-NEXT: movdqa %xmm14, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm2[8],xmm14[9],xmm2[9],xmm14[10],xmm2[10],xmm14[11],xmm2[11],xmm14[12],xmm2[12],xmm14[13],xmm2[13],xmm14[14],xmm2[14],xmm14[15],xmm2[15]
+; SSE-NEXT: pxor %xmm7, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[3,1],xmm1[2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm14[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,6]
+; SSE-NEXT: packuswb %xmm2, %xmm1
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,4,5,6,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,0]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,4,6,7]
+; SSE-NEXT: packuswb %xmm2, %xmm2
+; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: pandn %xmm2, %xmm15
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm9
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[3,0]
+; SSE-NEXT: por %xmm1, %xmm15
+; SSE-NEXT: movaps %xmm6, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,2]
+; SSE-NEXT: pand %xmm10, %xmm15
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pandn %xmm1, %xmm6
-; SSE-NEXT: por %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm1, %xmm10
+; SSE-NEXT: por %xmm15, %xmm10
; SSE-NEXT: por %xmm11, %xmm12
; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3],xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[0,1,1,3]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[0,1,1,3]
; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,3],xmm1[1,2]
-; SSE-NEXT: movdqa %xmm15, %xmm1
-; SSE-NEXT: pand %xmm4, %xmm1
-; SSE-NEXT: pandn %xmm5, %xmm4
-; SSE-NEXT: por %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
-; SSE-NEXT: pand %xmm13, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
+; SSE-NEXT: pandn %xmm8, %xmm5
+; SSE-NEXT: por %xmm4, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15]
+; SSE-NEXT: pand %xmm13, %xmm5
; SSE-NEXT: pandn %xmm1, %xmm13
; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,2,3,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm12[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
-; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: por %xmm4, %xmm13
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,0,0,0,65535,65535]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm13[3,1,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6,4]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,6,7]
-; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm1, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm2[2,1]
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, (%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, (%rdx)
-; SSE-NEXT: movdqa %xmm10, (%rcx)
-; SSE-NEXT: movdqa %xmm6, (%r8)
-; SSE-NEXT: movaps %xmm4, (%r9)
+; SSE-NEXT: por %xmm5, %xmm13
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm13[3,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6,4]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7]
+; SSE-NEXT: packuswb %xmm5, %xmm5
+; SSE-NEXT: pand %xmm3, %xmm5
+; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: por %xmm5, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[3,1,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm9[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,0,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
+; SSE-NEXT: packuswb %xmm1, %xmm5
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,1]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, (%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, (%rdx)
+; SSE-NEXT: movdqa %xmm0, (%rcx)
+; SSE-NEXT: movdqa %xmm10, (%r8)
+; SSE-NEXT: movaps %xmm3, (%r9)
; SSE-NEXT: retq
;
; AVX-LABEL: load_i8_stride5_vf16:
@@ -2115,137 +2126,131 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE: # %bb.0:
; SSE-NEXT: subq $184, %rsp
; SSE-NEXT: movdqa (%rdi), %xmm9
-; SSE-NEXT: movdqa 16(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 16(%rdi), %xmm11
; SSE-NEXT: movdqa 32(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 48(%rdi), %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 48(%rdi), %xmm5
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm4, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pxor %xmm5, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm6
; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255]
-; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm8, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; SSE-NEXT: movdqa %xmm15, %xmm1
-; SSE-NEXT: pandn %xmm3, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: pandn %xmm9, %xmm11
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: pandn %xmm11, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: pandn %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa %xmm13, %xmm2
; SSE-NEXT: pandn %xmm9, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: pandn %xmm9, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm2
+; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pandn %xmm9, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm15, %xmm9
+; SSE-NEXT: pand %xmm10, %xmm9
; SSE-NEXT: por %xmm1, %xmm9
; SSE-NEXT: movdqa %xmm9, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,0,0,65535,65535]
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm6[8],xmm9[9],xmm6[9],xmm9[10],xmm6[10],xmm9[11],xmm6[11],xmm9[12],xmm6[12],xmm9[13],xmm6[13],xmm9[14],xmm6[14],xmm9[15],xmm6[15]
; SSE-NEXT: pand %xmm1, %xmm9
-; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: por %xmm3, %xmm9
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm9[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa 64(%rdi), %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: movdqa 64(%rdi), %xmm7
+; SSE-NEXT: movdqa %xmm7, %xmm15
; SSE-NEXT: pxor %xmm0, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: movdqa %xmm6, %xmm8
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[2,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
+; SSE-NEXT: movdqa %xmm7, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm15[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,4]
; SSE-NEXT: packuswb %xmm0, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
-; SSE-NEXT: movdqa %xmm9, %xmm6
-; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
; SSE-NEXT: pand %xmm9, %xmm2
-; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 112(%rdi), %xmm10
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 112(%rdi), %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: pandn %xmm10, %xmm0
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 128(%rdi), %xmm7
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: movdqa 128(%rdi), %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm4, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[0,1,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; SSE-NEXT: packuswb %xmm2, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,3]
-; SSE-NEXT: movdqa %xmm13, %xmm2
-; SSE-NEXT: movdqa %xmm13, %xmm3
+; SSE-NEXT: movdqa %xmm8, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: movdqa 96(%rdi), %xmm4
-; SSE-NEXT: movdqa %xmm4, (%rsp) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm4, %xmm0
-; SSE-NEXT: movdqa 80(%rdi), %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm15, %xmm6
-; SSE-NEXT: por %xmm0, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
-; SSE-NEXT: pand %xmm1, %xmm6
+; SSE-NEXT: movdqa 96(%rdi), %xmm12
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: pandn %xmm12, %xmm0
+; SSE-NEXT: movdqa 80(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm10, %xmm3
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
+; SSE-NEXT: pand %xmm1, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: por %xmm6, %xmm1
+; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm13, %xmm0
+; SSE-NEXT: pand %xmm8, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa 144(%rdi), %xmm12
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE-NEXT: movdqa 144(%rdi), %xmm4
+; SSE-NEXT: movdqa %xmm4, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm5[8],xmm12[9],xmm5[9],xmm12[10],xmm5[10],xmm12[11],xmm5[11],xmm12[12],xmm5[12],xmm12[13],xmm5[13],xmm12[14],xmm5[14],xmm12[15],xmm5[15]
-; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm4, (%rsp) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
@@ -2257,15 +2262,14 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pand %xmm9, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm0
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pand %xmm15, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm10, %xmm5
+; SSE-NEXT: por %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; SSE-NEXT: movdqa %xmm5, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
@@ -2273,37 +2277,37 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
-; SSE-NEXT: psllq $48, %xmm1
-; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm13, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: pand %xmm13, %xmm1
-; SSE-NEXT: por %xmm11, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
+; SSE-NEXT: psllq $48, %xmm5
+; SSE-NEXT: packuswb %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: pandn %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm11, %xmm1
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: pandn %xmm6, %xmm11
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm5
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: por %xmm11, %xmm1
+; SSE-NEXT: por %xmm5, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,4,5,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm11
+; SSE-NEXT: pand %xmm8, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm8, %xmm2
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm2[0,2]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,4,7,6,7]
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm15[3,0]
+; SSE-NEXT: movaps %xmm15, %xmm3
+; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm2[0,2]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,4]
; SSE-NEXT: packuswb %xmm2, %xmm2
@@ -2312,70 +2316,71 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pand %xmm9, %xmm1
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm2
-; SSE-NEXT: pandn %xmm10, %xmm2
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: pand %xmm10, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm2[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm2[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,6,5,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm2[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
; SSE-NEXT: psllq $48, %xmm1
; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm13, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: pandn %xmm7, %xmm2
-; SSE-NEXT: movdqa (%rsp), %xmm8 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm8, %xmm6
-; SSE-NEXT: pand %xmm13, %xmm6
-; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: pandn %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm12, %xmm3
+; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
+; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: por %xmm3, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,4,5,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm11, %xmm2
-; SSE-NEXT: pand %xmm11, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0],xmm2[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm12[0,2]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,5,4]
-; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pand %xmm9, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm14, %xmm4
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm3, %xmm6
-; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: por %xmm6, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[2,1,2,3,4,5,6,7]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0,2]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: pand %xmm9, %xmm2
+; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm13, %xmm11
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm11, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3],xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7]
+; SSE-NEXT: pand %xmm8, %xmm11
+; SSE-NEXT: por %xmm2, %xmm11
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
@@ -2383,40 +2388,35 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm0, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,0,0,65535,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm1, %xmm10
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm13, %xmm6
-; SSE-NEXT: movdqa %xmm13, %xmm12
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: pandn %xmm11, %xmm6
-; SSE-NEXT: movdqa %xmm14, %xmm4
-; SSE-NEXT: pandn %xmm11, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm15, %xmm11
-; SSE-NEXT: movdqa %xmm15, %xmm4
-; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3],xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm11[0,1,2,0]
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm0[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm11[0,2]
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: pandn %xmm4, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: pandn %xmm3, %xmm14
+; SSE-NEXT: movdqa %xmm13, %xmm11
+; SSE-NEXT: pandn %xmm3, %xmm11
+; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm10, %xmm3
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm3[0,1,2,0]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
-; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm13[0,1,2,3,4,5,6,5]
-; SSE-NEXT: packuswb %xmm0, %xmm11
-; SSE-NEXT: pand %xmm10, %xmm11
-; SSE-NEXT: por %xmm2, %xmm11
-; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: # xmm0 = mem[1,1,1,1]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: # xmm2 = mem[0,2,2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm11[0,1,2,3,4,5,6,5]
+; SSE-NEXT: packuswb %xmm0, %xmm3
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[1,1,1,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
@@ -2424,264 +2424,260 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm0, %xmm0
; SSE-NEXT: movdqa %xmm9, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm9, %xmm11
-; SSE-NEXT: por %xmm11, %xmm2
+; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: pandn %xmm7, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm15
-; SSE-NEXT: movdqa %xmm8, %xmm2
-; SSE-NEXT: pand %xmm14, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
-; SSE-NEXT: movdqa %xmm3, %xmm11
-; SSE-NEXT: pandn %xmm0, %xmm11
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; SSE-NEXT: pand %xmm3, %xmm2
-; SSE-NEXT: por %xmm11, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[2,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: pandn %xmm5, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm12
+; SSE-NEXT: por %xmm0, %xmm12
+; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
+; SSE-NEXT: movdqa %xmm8, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3],xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
+; SSE-NEXT: pand %xmm8, %xmm12
+; SSE-NEXT: por %xmm3, %xmm12
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm12[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm10, %xmm13
-; SSE-NEXT: pandn %xmm0, %xmm13
-; SSE-NEXT: movdqa %xmm4, %xmm11
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pandn %xmm2, %xmm11
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: pand %xmm12, %xmm0
-; SSE-NEXT: movdqa %xmm2, %xmm7
-; SSE-NEXT: pand %xmm12, %xmm7
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: pandn %xmm8, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm14, %xmm5
-; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm14, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm8, %xmm14
-; SSE-NEXT: pand %xmm4, %xmm8
-; SSE-NEXT: por %xmm11, %xmm8
-; SSE-NEXT: movdqa %xmm8, %xmm11
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm1[8],xmm11[9],xmm1[9],xmm11[10],xmm1[10],xmm11[11],xmm1[11],xmm11[12],xmm1[12],xmm11[13],xmm1[13],xmm11[14],xmm1[14],xmm11[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,1,2,0]
-; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm11[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm8[0,2]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm11[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,3,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,6,6,6,6]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,5]
-; SSE-NEXT: packuswb %xmm8, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: por %xmm13, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,1,1,1]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm5[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm8[0],xmm11[1],xmm8[1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm11[0,3,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,7,6,5]
-; SSE-NEXT: packuswb %xmm8, %xmm11
-; SSE-NEXT: movdqa %xmm9, %xmm12
-; SSE-NEXT: pandn %xmm11, %xmm12
-; SSE-NEXT: pand %xmm9, %xmm1
-; SSE-NEXT: por %xmm1, %xmm12
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
-; SSE-NEXT: pand %xmm13, %xmm1
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm1, %xmm11
-; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm2[8],xmm11[9],xmm2[9],xmm11[10],xmm2[10],xmm11[11],xmm2[11],xmm11[12],xmm2[12],xmm11[13],xmm2[13],xmm11[14],xmm2[14],xmm11[15],xmm2[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm11[2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,0]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,4,6,7]
-; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
; SSE-NEXT: movdqa %xmm10, %xmm11
-; SSE-NEXT: pandn %xmm1, %xmm11
-; SSE-NEXT: por %xmm6, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,6]
-; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: por %xmm11, %xmm1
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: movaps %xmm10, %xmm0
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[0,2]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,7,5]
-; SSE-NEXT: packuswb %xmm0, %xmm6
-; SSE-NEXT: movdqa %xmm9, %xmm8
-; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: pand %xmm9, %xmm1
-; SSE-NEXT: por %xmm1, %xmm8
-; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: pand %xmm13, %xmm15
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: por %xmm15, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pxor %xmm6, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[2,0]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: por %xmm7, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
-; SSE-NEXT: pxor %xmm13, %xmm13
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,6]
-; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,0,0,65535,65535,65535,65535,65535]
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm0[0,1,2,3,4,5,6,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[3,1,2,0]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,4,6,7]
-; SSE-NEXT: packuswb %xmm6, %xmm6
-; SSE-NEXT: pandn %xmm6, %xmm2
-; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: pandn %xmm5, %xmm11
; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: movdqa %xmm4, %xmm15
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm5[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[0,2]
-; SSE-NEXT: pand %xmm9, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm5[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,5]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pandn %xmm2, %xmm9
-; SSE-NEXT: por %xmm1, %xmm9
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm13[8],xmm1[9],xmm13[9],xmm1[10],xmm13[10],xmm1[11],xmm13[11],xmm1[12],xmm13[12],xmm1[13],xmm13[13],xmm1[14],xmm13[14],xmm1[15],xmm13[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3],xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[1,2]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
-; SSE-NEXT: packuswb %xmm1, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,0,0,65535,65535]
-; SSE-NEXT: movdqa %xmm4, %xmm6
-; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm5, %xmm12
+; SSE-NEXT: pand %xmm4, %xmm12
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: pandn %xmm7, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm13, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm7, %xmm13
+; SSE-NEXT: pand %xmm10, %xmm7
+; SSE-NEXT: por %xmm11, %xmm7
+; SSE-NEXT: movdqa %xmm7, %xmm11
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm6[8],xmm11[9],xmm6[9],xmm11[10],xmm6[10],xmm11[11],xmm6[11],xmm11[12],xmm6[12],xmm11[13],xmm6[13],xmm11[14],xmm6[14],xmm11[15],xmm6[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,1,2,0]
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm11[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,1],xmm7[0,2]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm11[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,3,2,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,6,6,6]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,5]
+; SSE-NEXT: packuswb %xmm7, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[1,1,1,1]
+; SSE-NEXT: movdqa (%rsp), %xmm4 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,5]
+; SSE-NEXT: packuswb %xmm3, %xmm3
+; SSE-NEXT: movdqa %xmm9, %xmm7
+; SSE-NEXT: pandn %xmm3, %xmm7
+; SSE-NEXT: pand %xmm9, %xmm2
+; SSE-NEXT: por %xmm2, %xmm7
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm15, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
+; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3],xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7]
-; SSE-NEXT: movdqa %xmm3, %xmm11
-; SSE-NEXT: pandn %xmm0, %xmm11
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
-; SSE-NEXT: pand %xmm3, %xmm2
-; SSE-NEXT: por %xmm11, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,4]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pxor %xmm11, %xmm11
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm11[8],xmm3[9],xmm11[9],xmm3[10],xmm11[10],xmm3[11],xmm11[11],xmm3[12],xmm11[12],xmm3[13],xmm11[13],xmm3[14],xmm11[14],xmm3[15],xmm11[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm3[2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,0]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,4,6,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm2
-; SSE-NEXT: por %xmm6, %xmm2
-; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT: # xmm6 = mem[3,1,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,2,1,4,5,6,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[0,1,0,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1]
-; SSE-NEXT: packuswb %xmm1, %xmm10
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm10[2,1]
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm14, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm13[8],xmm1[9],xmm13[9],xmm1[10],xmm13[10],xmm1[11],xmm13[11],xmm1[12],xmm13[12],xmm1[13],xmm13[13],xmm1[14],xmm13[14],xmm1[15],xmm13[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm14[0,1,1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,3],xmm1[1,2]
-; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm5, %xmm0
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; SSE-NEXT: por %xmm0, %xmm5
-; SSE-NEXT: movdqa %xmm5, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm13[8],xmm5[9],xmm13[9],xmm5[10],xmm13[10],xmm5[11],xmm13[11],xmm5[12],xmm13[12],xmm5[13],xmm13[13],xmm5[14],xmm13[14],xmm5[15],xmm13[15]
-; SSE-NEXT: pand %xmm3, %xmm5
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: por %xmm5, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[3,1,0,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: por %xmm14, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm11[8],xmm0[9],xmm11[9],xmm0[10],xmm11[10],xmm0[11],xmm11[11],xmm0[12],xmm11[12],xmm0[13],xmm11[13],xmm0[14],xmm11[14],xmm0[15],xmm11[15]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm2[2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,5,5,6]
+; SSE-NEXT: packuswb %xmm0, %xmm5
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; SSE-NEXT: movaps %xmm14, %xmm0
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[0,2]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,7,5]
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: pandn %xmm0, %xmm2
+; SSE-NEXT: pand %xmm9, %xmm5
+; SSE-NEXT: por %xmm5, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: por %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,1],xmm0[2,0]
+; SSE-NEXT: movaps %xmm6, %xmm5
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: por %xmm12, %xmm6
+; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,1],xmm0[2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
+; SSE-NEXT: packuswb %xmm3, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm5[0,1,2,3,4,5,6,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,0]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,4,6,7]
+; SSE-NEXT: packuswb %xmm3, %xmm3
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm1
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0,2]
+; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,7,5]
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: por %xmm1, %xmm9
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm11[8],xmm0[9],xmm11[9],xmm0[10],xmm11[10],xmm0[11],xmm11[11],xmm0[12],xmm11[12],xmm0[13],xmm11[13],xmm0[14],xmm11[14],xmm0[15],xmm11[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1],xmm3[2],xmm11[2],xmm3[3],xmm11[3],xmm3[4],xmm11[4],xmm3[5],xmm11[5],xmm3[6],xmm11[6],xmm3[7],xmm11[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[1,2]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,3,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[2,1,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
+; SSE-NEXT: packuswb %xmm3, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pandn %xmm1, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; SSE-NEXT: pand %xmm10, %xmm15
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm15, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3],xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
+; SSE-NEXT: movdqa %xmm8, %xmm5
+; SSE-NEXT: pandn %xmm1, %xmm5
+; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm11[8],xmm15[9],xmm11[9],xmm15[10],xmm11[10],xmm15[11],xmm11[11],xmm15[12],xmm11[12],xmm15[13],xmm11[13],xmm15[14],xmm11[14],xmm15[15],xmm11[15]
+; SSE-NEXT: pand %xmm8, %xmm15
+; SSE-NEXT: por %xmm5, %xmm15
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm15[3,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,4]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm4, %xmm7
-; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,2,3,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm14[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5,4,7]
-; SSE-NEXT: packuswb %xmm3, %xmm4
-; SSE-NEXT: pandn %xmm4, %xmm7
-; SSE-NEXT: por %xmm7, %xmm1
+; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: por %xmm4, %xmm1
; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; SSE-NEXT: # xmm4 = mem[3,1,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm15[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm14[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,2,1,4,5,6,7]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,0,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; SSE-NEXT: packuswb %xmm3, %xmm5
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,1]
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, 16(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, (%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, 16(%rdx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, (%rdx)
-; SSE-NEXT: movdqa %xmm12, 16(%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, (%rcx)
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm13, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm11[8],xmm4[9],xmm11[9],xmm4[10],xmm11[10],xmm4[11],xmm11[11],xmm4[12],xmm11[12],xmm4[13],xmm11[13],xmm4[14],xmm11[14],xmm4[15],xmm11[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3],xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[0,1,1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,3],xmm4[1,2]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: pand %xmm10, %xmm4
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: por %xmm4, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3],xmm4[4],xmm11[4],xmm4[5],xmm11[5],xmm4[6],xmm11[6],xmm4[7],xmm11[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm11[8],xmm5[9],xmm11[9],xmm5[10],xmm11[10],xmm5[11],xmm11[11],xmm5[12],xmm11[12],xmm5[13],xmm11[13],xmm5[14],xmm11[14],xmm5[15],xmm11[15]
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pandn %xmm4, %xmm8
+; SSE-NEXT: por %xmm5, %xmm8
+; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,2,3,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm13[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[2,1,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
+; SSE-NEXT: packuswb %xmm4, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[3,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6,4]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7]
+; SSE-NEXT: packuswb %xmm5, %xmm5
+; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: por %xmm5, %xmm0
+; SSE-NEXT: pshufd $231, (%rsp), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: # xmm3 = mem[3,1,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,0,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; SSE-NEXT: packuswb %xmm4, %xmm5
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,1]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movaps %xmm3, 16(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movaps %xmm3, (%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movaps %xmm3, 16(%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movaps %xmm3, (%rdx)
+; SSE-NEXT: movdqa %xmm7, 16(%rcx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movaps %xmm3, (%rcx)
; SSE-NEXT: movdqa %xmm9, 16(%r8)
-; SSE-NEXT: movdqa %xmm8, (%r8)
-; SSE-NEXT: movaps %xmm1, 16(%r9)
-; SSE-NEXT: movaps %xmm2, (%r9)
+; SSE-NEXT: movdqa %xmm2, (%r8)
+; SSE-NEXT: movaps %xmm0, 16(%r9)
+; SSE-NEXT: movaps %xmm1, (%r9)
; SSE-NEXT: addq $184, %rsp
; SSE-NEXT: retq
;
@@ -4067,1131 +4063,1140 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
; SSE-LABEL: load_i8_stride5_vf64:
; SSE: # %bb.0:
-; SSE-NEXT: subq $552, %rsp # imm = 0x228
-; SSE-NEXT: movdqa 160(%rdi), %xmm9
-; SSE-NEXT: movdqa 176(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: subq $568, %rsp # imm = 0x238
+; SSE-NEXT: movdqa 160(%rdi), %xmm10
+; SSE-NEXT: movdqa 176(%rdi), %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 208(%rdi), %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 192(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
+; SSE-NEXT: movdqa %xmm14, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm2, %xmm14
+; SSE-NEXT: pand %xmm14, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pxor %xmm12, %xmm12
+; SSE-NEXT: pxor %xmm9, %xmm9
; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255]
-; SSE-NEXT: movdqa %xmm11, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm15, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: pandn %xmm9, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; SSE-NEXT: movdqa %xmm7, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm4, %xmm6
+; SSE-NEXT: pandn %xmm10, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm3, %xmm5
+; SSE-NEXT: pandn %xmm10, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: pandn %xmm10, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm7, %xmm2
; SSE-NEXT: movdqa %xmm7, %xmm3
-; SSE-NEXT: pandn %xmm9, %xmm3
+; SSE-NEXT: pandn %xmm10, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: pandn %xmm9, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: pandn %xmm9, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm9
-; SSE-NEXT: por %xmm0, %xmm9
-; SSE-NEXT: movdqa %xmm9, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm8 = [0,65535,65535,65535,0,0,65535,65535]
-; SSE-NEXT: movdqa %xmm8, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm12[8],xmm9[9],xmm12[9],xmm9[10],xmm12[10],xmm9[11],xmm12[11],xmm9[12],xmm12[12],xmm9[13],xmm12[13],xmm9[14],xmm12[14],xmm9[15],xmm12[15]
-; SSE-NEXT: pand %xmm8, %xmm9
-; SSE-NEXT: por %xmm2, %xmm9
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pand %xmm7, %xmm10
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [0,65535,65535,65535,0,0,65535,65535]
+; SSE-NEXT: movdqa %xmm7, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
+; SSE-NEXT: pand %xmm7, %xmm10
+; SSE-NEXT: por %xmm3, %xmm10
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm11, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa 224(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
+; SSE-NEXT: movdqa 224(%rdi), %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pxor %xmm9, %xmm9
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,3]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm3[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,4]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
-; SSE-NEXT: movdqa %xmm6, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pand %xmm6, %xmm0
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
+; SSE-NEXT: movdqa %xmm8, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 32(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm14, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa 48(%rdi), %xmm15
-; SSE-NEXT: movdqa %xmm15, %xmm1
+; SSE-NEXT: movdqa 48(%rdi), %xmm1
+; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
; SSE-NEXT: pand %xmm14, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,3]
-; SSE-NEXT: movdqa %xmm11, %xmm1
+; SSE-NEXT: movdqa %xmm15, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: movdqa (%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm3
-; SSE-NEXT: por %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3],xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
-; SSE-NEXT: movdqa %xmm8, %xmm4
-; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm9[8],xmm3[9],xmm9[9],xmm3[10],xmm9[10],xmm3[11],xmm9[11],xmm3[12],xmm9[12],xmm3[13],xmm9[13],xmm3[14],xmm9[14],xmm3[15],xmm9[15]
-; SSE-NEXT: pand %xmm8, %xmm3
-; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,7]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm11, %xmm2
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa 64(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa (%rdi), %xmm4
+; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: pandn %xmm4, %xmm6
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm5, %xmm6
+; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm14, %xmm5
+; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm4, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm7, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm5
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm9[8],xmm4[9],xmm9[9],xmm4[10],xmm9[10],xmm4[11],xmm9[11],xmm4[12],xmm9[12],xmm4[13],xmm9[13],xmm4[14],xmm9[14],xmm4[15],xmm9[15]
+; SSE-NEXT: pand %xmm7, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm4[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,7]
+; SSE-NEXT: packuswb %xmm3, %xmm3
+; SSE-NEXT: pand %xmm15, %xmm3
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa 64(%rdi), %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm3[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm4[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm4[2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,4]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm6, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: pand %xmm6, %xmm2
-; SSE-NEXT: por %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 272(%rdi), %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm1
-; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: movdqa 288(%rdi), %xmm13
-; SSE-NEXT: movdqa %xmm13, %xmm2
-; SSE-NEXT: pand %xmm14, %xmm2
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm9[8],xmm2[9],xmm9[9],xmm2[10],xmm9[10],xmm2[11],xmm9[11],xmm2[12],xmm9[12],xmm2[13],xmm9[13],xmm2[14],xmm9[14],xmm2[15],xmm9[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,3]
-; SSE-NEXT: movdqa %xmm11, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa 256(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: movdqa %xmm8, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: movdqa 240(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn %xmm3, %xmm1
-; SSE-NEXT: pandn %xmm3, %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm7
-; SSE-NEXT: pandn %xmm3, %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm7
-; SSE-NEXT: pandn %xmm3, %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm3
-; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
-; SSE-NEXT: movdqa %xmm8, %xmm7
-; SSE-NEXT: pandn %xmm4, %xmm7
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm9[8],xmm3[9],xmm9[9],xmm3[10],xmm9[10],xmm3[11],xmm9[11],xmm3[12],xmm9[12],xmm3[13],xmm9[13],xmm3[14],xmm9[14],xmm3[15],xmm9[15]
; SSE-NEXT: pand %xmm8, %xmm3
-; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,7]
-; SSE-NEXT: packuswb %xmm3, %xmm3
-; SSE-NEXT: pand %xmm11, %xmm3
-; SSE-NEXT: por %xmm2, %xmm3
-; SSE-NEXT: movdqa 304(%rdi), %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm9[8],xmm2[9],xmm9[9],xmm2[10],xmm9[10],xmm2[11],xmm9[11],xmm2[12],xmm9[12],xmm2[13],xmm9[13],xmm2[14],xmm9[14],xmm2[15],xmm9[15]
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm4[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,4]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: pand %xmm6, %xmm3
; SSE-NEXT: por %xmm3, %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 112(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: movdqa 128(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 272(%rdi), %xmm13
+; SSE-NEXT: movdqa %xmm14, %xmm1
+; SSE-NEXT: pandn %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 288(%rdi), %xmm10
+; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm14, %xmm3
-; SSE-NEXT: por %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3],xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,1,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,7]
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,1,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,7]
; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm9[8],xmm3[9],xmm9[9],xmm3[10],xmm9[10],xmm3[11],xmm9[11],xmm3[12],xmm9[12],xmm3[13],xmm9[13],xmm3[14],xmm9[14],xmm3[15],xmm9[15]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE-NEXT: packuswb %xmm3, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,3]
-; SSE-NEXT: movdqa %xmm11, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: movdqa 96(%rdi), %xmm4
+; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; SSE-NEXT: packuswb %xmm3, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,3]
+; SSE-NEXT: movdqa %xmm15, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: movdqa 256(%rdi), %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pandn %xmm1, %xmm5
+; SSE-NEXT: movdqa 240(%rdi), %xmm11
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: pandn %xmm11, %xmm1
+; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: pandn %xmm11, %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: pandn %xmm4, %xmm2
-; SSE-NEXT: movdqa 80(%rdi), %xmm4
+; SSE-NEXT: movdqa %xmm14, %xmm4
+; SSE-NEXT: pandn %xmm11, %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm4
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3],xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm9[8],xmm4[9],xmm9[9],xmm4[10],xmm9[10],xmm4[11],xmm9[11],xmm4[12],xmm9[12],xmm4[13],xmm9[13],xmm4[14],xmm9[14],xmm4[15],xmm9[15]
-; SSE-NEXT: pand %xmm8, %xmm4
-; SSE-NEXT: pandn %xmm2, %xmm8
-; SSE-NEXT: por %xmm4, %xmm8
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,7]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm11, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: movdqa 144(%rdi), %xmm12
-; SSE-NEXT: movdqa %xmm12, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pandn %xmm11, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm2, %xmm11
+; SSE-NEXT: por %xmm5, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm5
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; SSE-NEXT: movdqa %xmm7, %xmm6
+; SSE-NEXT: pandn %xmm5, %xmm6
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
+; SSE-NEXT: pand %xmm7, %xmm11
+; SSE-NEXT: por %xmm6, %xmm11
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm11[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,7]
+; SSE-NEXT: packuswb %xmm5, %xmm5
+; SSE-NEXT: pand %xmm15, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: movdqa 304(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15]
-; SSE-NEXT: movdqa %xmm12, %xmm3
-; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm9[8],xmm3[9],xmm9[9],xmm3[10],xmm9[10],xmm3[11],xmm9[11],xmm3[12],xmm9[12],xmm3[13],xmm9[13],xmm3[14],xmm9[14],xmm3[15],xmm9[15]
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm4[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,4]
; SSE-NEXT: packuswb %xmm3, %xmm3
-; SSE-NEXT: movdqa %xmm6, %xmm14
-; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: movdqa %xmm8, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: pand %xmm6, %xmm2
-; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: por %xmm5, %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pand %xmm10, %xmm3
-; SSE-NEXT: por %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm9[8],xmm2[9],xmm9[9],xmm2[10],xmm9[10],xmm2[11],xmm9[11],xmm2[12],xmm9[12],xmm2[13],xmm9[13],xmm2[14],xmm9[14],xmm2[15],xmm9[15]
+; SSE-NEXT: movdqa 112(%rdi), %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: pandn %xmm4, %xmm3
+; SSE-NEXT: movdqa 128(%rdi), %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm14, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm2[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm2[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
-; SSE-NEXT: psllq $48, %xmm3
-; SSE-NEXT: packuswb %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm11, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,1,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm9[8],xmm5[9],xmm9[9],xmm5[10],xmm9[10],xmm5[11],xmm9[11],xmm5[12],xmm9[12],xmm5[13],xmm9[13],xmm5[14],xmm9[14],xmm5[15],xmm9[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,4,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; SSE-NEXT: packuswb %xmm5, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,2,3]
+; SSE-NEXT: movdqa %xmm15, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm5
+; SSE-NEXT: movdqa 96(%rdi), %xmm14
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pandn %xmm14, %xmm3
+; SSE-NEXT: movdqa 80(%rdi), %xmm6
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm2, %xmm6
+; SSE-NEXT: por %xmm3, %xmm6
; SSE-NEXT: movdqa %xmm6, %xmm3
-; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm3, %xmm7
-; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm2, %xmm8
-; SSE-NEXT: pandn %xmm7, %xmm8
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
-; SSE-NEXT: pand %xmm2, %xmm3
-; SSE-NEXT: por %xmm8, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,5]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm9[8],xmm6[9],xmm9[9],xmm6[10],xmm9[10],xmm6[11],xmm9[11],xmm6[12],xmm9[12],xmm6[13],xmm9[13],xmm6[14],xmm9[14],xmm6[15],xmm9[15]
+; SSE-NEXT: pand %xmm7, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm7
+; SSE-NEXT: por %xmm6, %xmm7
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,2,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,4,5,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,7]
; SSE-NEXT: packuswb %xmm3, %xmm3
-; SSE-NEXT: pand %xmm11, %xmm3
-; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm7[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm4[0,2]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm7[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,5,4]
-; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: movdqa %xmm14, %xmm7
-; SSE-NEXT: pandn %xmm4, %xmm7
-; SSE-NEXT: pand %xmm14, %xmm3
-; SSE-NEXT: por %xmm3, %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm3
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm15, %xmm4
-; SSE-NEXT: pand %xmm10, %xmm4
-; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: pand %xmm15, %xmm3
+; SSE-NEXT: por %xmm5, %xmm3
+; SSE-NEXT: movdqa 144(%rdi), %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm9[8],xmm5[9],xmm9[9],xmm5[10],xmm9[10],xmm5[11],xmm9[11],xmm5[12],xmm9[12],xmm5[13],xmm9[13],xmm5[14],xmm9[14],xmm5[15],xmm9[15]
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm4[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm4[2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6,4]
+; SSE-NEXT: packuswb %xmm5, %xmm5
+; SSE-NEXT: movdqa %xmm8, %xmm4
+; SSE-NEXT: pandn %xmm5, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm9[8],xmm3[9],xmm9[9],xmm3[10],xmm9[10],xmm3[11],xmm9[11],xmm3[12],xmm9[12],xmm3[13],xmm9[13],xmm3[14],xmm9[14],xmm3[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
-; SSE-NEXT: movdqa %xmm4, %xmm7
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm3[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm3[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm7[0,1,2,3,6,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; SSE-NEXT: movdqa %xmm5, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm3[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm3[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,7,7,7]
-; SSE-NEXT: psllq $48, %xmm4
-; SSE-NEXT: packuswb %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm11, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm10, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm8
-; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
-; SSE-NEXT: movdqa %xmm2, %xmm7
+; SSE-NEXT: psllq $48, %xmm5
+; SSE-NEXT: packuswb %xmm3, %xmm5
+; SSE-NEXT: movdqa %xmm15, %xmm6
+; SSE-NEXT: pandn %xmm5, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm5, %xmm7
+; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,0,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm3, %xmm11
+; SSE-NEXT: pandn %xmm7, %xmm11
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; SSE-NEXT: pand %xmm3, %xmm5
+; SSE-NEXT: por %xmm11, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,4,5,7]
+; SSE-NEXT: packuswb %xmm5, %xmm5
+; SSE-NEXT: pand %xmm15, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm7[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm6[0,2]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
+; SSE-NEXT: packuswb %xmm6, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm7
+; SSE-NEXT: pandn %xmm6, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: por %xmm5, %xmm7
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm4, %xmm9
+; SSE-NEXT: movdqa %xmm4, %xmm5
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: movdqa (%rsp), %xmm6 # 16-byte Reload
+; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: por %xmm5, %xmm6
+; SSE-NEXT: movdqa %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE-NEXT: pxor %xmm12, %xmm12
+; SSE-NEXT: movdqa %xmm6, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm5[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm7[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
+; SSE-NEXT: psllq $48, %xmm6
+; SSE-NEXT: packuswb %xmm5, %xmm6
+; SSE-NEXT: movdqa %xmm15, %xmm5
+; SSE-NEXT: pandn %xmm6, %xmm5
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm4, %xmm6
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm12[8],xmm0[9],xmm12[9],xmm0[10],xmm12[10],xmm0[11],xmm12[11],xmm0[12],xmm12[12],xmm0[13],xmm12[13],xmm0[14],xmm12[14],xmm0[15],xmm12[15]
+; SSE-NEXT: movdqa %xmm3, %xmm7
; SSE-NEXT: pandn %xmm0, %xmm7
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
-; SSE-NEXT: pand %xmm2, %xmm4
-; SSE-NEXT: por %xmm7, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,2,1,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
+; SSE-NEXT: pand %xmm3, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,4,5,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm11, %xmm0
-; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,4]
-; SSE-NEXT: packuswb %xmm3, %xmm3
-; SSE-NEXT: movdqa %xmm14, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: pand %xmm14, %xmm0
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm13, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm3
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm0[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: por %xmm5, %xmm0
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm6[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[0,2]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,4]
+; SSE-NEXT: packuswb %xmm5, %xmm5
+; SSE-NEXT: movdqa %xmm8, %xmm6
+; SSE-NEXT: pandn %xmm5, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: por %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: pandn %xmm13, %xmm0
+; SSE-NEXT: pand %xmm9, %xmm10
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm12[8],xmm0[9],xmm12[9],xmm0[10],xmm12[10],xmm0[11],xmm12[11],xmm0[12],xmm12[12],xmm0[13],xmm12[13],xmm0[14],xmm12[14],xmm0[15],xmm12[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
+; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm0[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm0[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
-; SSE-NEXT: psllq $48, %xmm3
-; SSE-NEXT: packuswb %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload
-; SSE-NEXT: pand %xmm8, %xmm3
-; SSE-NEXT: movdqa %xmm8, %xmm7
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
-; SSE-NEXT: pand %xmm2, %xmm3
-; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,1,3]
+; SSE-NEXT: psllq $48, %xmm10
+; SSE-NEXT: packuswb %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm15, %xmm0
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm13, %xmm5
+; SSE-NEXT: pand %xmm11, %xmm5
+; SSE-NEXT: por %xmm1, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: pandn %xmm1, %xmm6
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
+; SSE-NEXT: pand %xmm3, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,4,5,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm11, %xmm1
+; SSE-NEXT: pand %xmm15, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[0,2]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,4,7,6,7]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm5[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[0,2]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: pand %xmm14, %xmm1
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: movdqa %xmm8, %xmm5
+; SSE-NEXT: pandn %xmm0, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: por %xmm1, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm8
+; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm9, %xmm6
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm1[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,3]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,6,5,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm1[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm1[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; SSE-NEXT: psllq $48, %xmm0
; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: pandn %xmm5, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
-; SSE-NEXT: pand %xmm2, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,1,3]
+; SSE-NEXT: movdqa %xmm11, %xmm1
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm14, %xmm5
+; SSE-NEXT: pand %xmm11, %xmm5
+; SSE-NEXT: por %xmm1, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
+; SSE-NEXT: pand %xmm3, %xmm5
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: por %xmm5, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,4,5,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm11, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm11
-; SSE-NEXT: por %xmm11, %xmm1
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0],xmm2[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm12[0,2]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm15
+; SSE-NEXT: por %xmm1, %xmm15
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm14, %xmm1
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm8, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm15
+; SSE-NEXT: por %xmm15, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,0,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
+; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,6,5,6,7]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm9 = [0,0,0,65535,65535,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: movdqa %xmm6, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; SSE-NEXT: pand %xmm11, %xmm1
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm9[8],xmm2[9],xmm9[9],xmm2[10],xmm9[10],xmm2[11],xmm9[11],xmm2[12],xmm9[12],xmm2[13],xmm9[13],xmm2[14],xmm9[14],xmm2[15],xmm9[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm6, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
-; SSE-NEXT: pand %xmm6, %xmm1
-; SSE-NEXT: por %xmm3, %xmm1
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm2, %xmm6
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: por %xmm1, %xmm6
+; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,1,2,0]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm1[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,4,5,6,5]
+; SSE-NEXT: packuswb %xmm1, %xmm6
+; SSE-NEXT: pand %xmm9, %xmm6
+; SSE-NEXT: por %xmm3, %xmm6
+; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: # xmm1 = mem[1,1,1,1]
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: # xmm3 = mem[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm15
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm5, %xmm4
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
+; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm6
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
+; SSE-NEXT: pand %xmm10, %xmm4
+; SSE-NEXT: por %xmm6, %xmm4
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [0,0,0,65535,65535,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm8, %xmm1
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pandn %xmm4, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm8, %xmm4
-; SSE-NEXT: por %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,1,2,0]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0,2]
+; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: movdqa (%rsp), %xmm2 # 16-byte Reload
+; SSE-NEXT: pandn %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm11, %xmm8
+; SSE-NEXT: movdqa %xmm11, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: pandn %xmm6, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm5, %xmm4
+; SSE-NEXT: pandn %xmm6, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: por %xmm1, %xmm6
+; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,1,2,0]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm1[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm7[0,1,2,3,4,5,6,5]
-; SSE-NEXT: packuswb %xmm1, %xmm4
-; SSE-NEXT: pand %xmm12, %xmm4
-; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,4,5,6,5]
+; SSE-NEXT: packuswb %xmm1, %xmm6
+; SSE-NEXT: pand %xmm9, %xmm6
+; SSE-NEXT: por %xmm3, %xmm6
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[1,1,1,1]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: # xmm2 = mem[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: # xmm3 = mem[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: pand %xmm14, %xmm4
-; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm11, %xmm10
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm9[8],xmm2[9],xmm9[9],xmm2[10],xmm9[10],xmm2[11],xmm9[11],xmm2[12],xmm9[12],xmm2[13],xmm9[13],xmm2[14],xmm9[14],xmm2[15],xmm9[15]
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; SSE-NEXT: pand %xmm6, %xmm10
-; SSE-NEXT: por %xmm4, %xmm10
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[2,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm15, %xmm11
+; SSE-NEXT: movdqa %xmm15, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm6
+; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm5, %xmm13
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm13, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
+; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm6
+; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; SSE-NEXT: pand %xmm10, %xmm13
+; SSE-NEXT: por %xmm6, %xmm13
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm13[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; SSE-NEXT: pandn %xmm15, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pandn %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm8, %xmm13
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: pandn %xmm6, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm8, %xmm4
-; SSE-NEXT: por %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,1,2,0]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0,2]
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: movdqa %xmm4, %xmm8
+; SSE-NEXT: por %xmm1, %xmm6
+; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,1,2,0]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm1[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm7[0,1,2,3,4,5,6,5]
-; SSE-NEXT: packuswb %xmm1, %xmm4
-; SSE-NEXT: pand %xmm12, %xmm4
-; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,4,5,6,5]
+; SSE-NEXT: packuswb %xmm1, %xmm6
+; SSE-NEXT: pand %xmm9, %xmm6
+; SSE-NEXT: por %xmm3, %xmm6
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[1,1,1,1]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: # xmm2 = mem[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: # xmm3 = mem[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pand %xmm14, %xmm4
-; SSE-NEXT: por %xmm4, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT: pand %xmm11, %xmm1
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm9[8],xmm2[9],xmm9[9],xmm2[10],xmm9[10],xmm2[11],xmm9[11],xmm2[12],xmm9[12],xmm2[13],xmm9[13],xmm2[14],xmm9[14],xmm2[15],xmm9[15]
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
-; SSE-NEXT: pand %xmm6, %xmm1
-; SSE-NEXT: por %xmm4, %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm6
+; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm5, %xmm7
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: pand %xmm5, %xmm14
+; SSE-NEXT: por %xmm1, %xmm14
+; SSE-NEXT: movdqa %xmm14, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
+; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: pandn %xmm1, %xmm6
+; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3],xmm14[4],xmm12[4],xmm14[5],xmm12[5],xmm14[6],xmm12[6],xmm14[7],xmm12[7]
+; SSE-NEXT: pand %xmm10, %xmm14
+; SSE-NEXT: por %xmm6, %xmm14
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm14[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
-; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm8, %xmm4
-; SSE-NEXT: pandn %xmm13, %xmm4
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: pandn %xmm7, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm1
-; SSE-NEXT: pandn %xmm7, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm8, %xmm7
-; SSE-NEXT: movdqa %xmm8, %xmm10
-; SSE-NEXT: por %xmm4, %xmm7
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm9[8],xmm4[9],xmm9[9],xmm4[10],xmm9[10],xmm4[11],xmm9[11],xmm4[12],xmm9[12],xmm4[13],xmm9[13],xmm4[14],xmm9[14],xmm4[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,1,2,0]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm4[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm7[0,2]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,6,6,6]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm8[0,1,2,3,4,5,6,5]
-; SSE-NEXT: packuswb %xmm4, %xmm7
-; SSE-NEXT: pand %xmm12, %xmm7
-; SSE-NEXT: por %xmm2, %xmm7
-; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: # xmm2 = mem[1,1,1,1]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: # xmm4 = mem[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[0,3,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: movdqa %xmm14, %xmm1
-; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: pand %xmm14, %xmm7
-; SSE-NEXT: por %xmm7, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm8
-; SSE-NEXT: movdqa %xmm11, %xmm2
-; SSE-NEXT: pandn %xmm5, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pand %xmm11, %xmm4
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm9[8],xmm2[9],xmm9[9],xmm2[10],xmm9[10],xmm2[11],xmm9[11],xmm2[12],xmm9[12],xmm2[13],xmm9[13],xmm2[14],xmm9[14],xmm2[15],xmm9[15]
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: pandn %xmm2, %xmm7
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
-; SSE-NEXT: pand %xmm6, %xmm4
-; SSE-NEXT: por %xmm7, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,5,6,7]
-; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: movdqa %xmm12, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm10, %xmm7
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: pandn %xmm5, %xmm7
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,6,5,6,7]
+; SSE-NEXT: packuswb %xmm3, %xmm3
+; SSE-NEXT: movdqa %xmm9, %xmm4
+; SSE-NEXT: pandn %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm8, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: pandn %xmm3, %xmm6
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movdqa %xmm0, %xmm14
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: pand %xmm1, %xmm14
-; SSE-NEXT: movdqa %xmm15, %xmm11
-; SSE-NEXT: pand %xmm1, %xmm11
-; SSE-NEXT: movdqa %xmm13, %xmm4
-; SSE-NEXT: pand %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm5, %xmm2
-; SSE-NEXT: pand %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm13, %xmm5
+; SSE-NEXT: pand %xmm13, %xmm14
+; SSE-NEXT: movdqa %xmm2, %xmm11
+; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: movdqa %xmm15, %xmm8
+; SSE-NEXT: pand %xmm13, %xmm8
+; SSE-NEXT: movdqa %xmm3, %xmm13
+; SSE-NEXT: pand %xmm5, %xmm13
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT: pandn %xmm13, %xmm5
+; SSE-NEXT: pand %xmm7, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm8, %xmm15
-; SSE-NEXT: pand %xmm8, %xmm13
-; SSE-NEXT: pand %xmm8, %xmm5
-; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm2, %xmm8
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: por %xmm7, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm7
-; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
+; SSE-NEXT: pand %xmm7, %xmm11
+; SSE-NEXT: movdqa %xmm11, (%rsp) # 16-byte Spill
+; SSE-NEXT: pand %xmm7, %xmm15
+; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: pandn %xmm13, %xmm7
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm12[8],xmm6[9],xmm12[9],xmm6[10],xmm12[10],xmm6[11],xmm12[11],xmm6[12],xmm12[12],xmm6[13],xmm12[13],xmm6[14],xmm12[14],xmm6[15],xmm12[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm7[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm0[0,2]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm7[2,1,2,3,4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[0,2]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,5]
; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: pand %xmm12, %xmm1
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: por %xmm4, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[1,1,1,1]
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; SSE-NEXT: # xmm4 = mem[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
+; SSE-NEXT: movdqa %xmm12, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: pand %xmm12, %xmm1
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
-; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255]
+; SSE-NEXT: pand %xmm7, %xmm0
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
+; SSE-NEXT: pxor %xmm3, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,0,1,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,4,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm9[8],xmm14[9],xmm9[9],xmm14[10],xmm9[10],xmm14[11],xmm9[11],xmm14[12],xmm9[12],xmm14[13],xmm9[13],xmm14[14],xmm9[14],xmm14[15],xmm9[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm3[8],xmm14[9],xmm3[9],xmm14[10],xmm3[10],xmm14[11],xmm3[11],xmm14[12],xmm3[12],xmm14[13],xmm3[13],xmm14[14],xmm3[14],xmm14[15],xmm3[15]
; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[3,1],xmm0[2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm14[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm14[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: pand %xmm9, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,6,6,7]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm4[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm1[0,2]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm10, %xmm9
-; SSE-NEXT: movdqa %xmm10, %xmm14
+; SSE-NEXT: movdqa %xmm12, %xmm14
; SSE-NEXT: pandn %xmm1, %xmm14
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: pand %xmm12, %xmm0
; SSE-NEXT: por %xmm0, %xmm14
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: pand %xmm7, %xmm0
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,0,1,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,4,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm2[8],xmm11[9],xmm2[9],xmm11[10],xmm2[10],xmm11[11],xmm2[11],xmm11[12],xmm2[12],xmm11[13],xmm2[13],xmm11[14],xmm2[14],xmm11[15],xmm2[15]
-; SSE-NEXT: pxor %xmm10, %xmm10
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[3,1],xmm0[2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm11[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm0[2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: pand %xmm9, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,6,6,7]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm4[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm1[0,2]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm9, %xmm11
-; SSE-NEXT: pandn %xmm1, %xmm11
-; SSE-NEXT: pand %xmm9, %xmm0
-; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm12, %xmm13
+; SSE-NEXT: pandn %xmm1, %xmm13
+; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: por %xmm0, %xmm13
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: pand %xmm7, %xmm0
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,0,1,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,4,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm0[2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm3[8],xmm8[9],xmm3[9],xmm8[10],xmm3[10],xmm8[11],xmm3[11],xmm8[12],xmm3[12],xmm8[13],xmm3[13],xmm8[14],xmm3[14],xmm8[15],xmm3[15]
+; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,1],xmm0[2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: packuswb %xmm3, %xmm0
+; SSE-NEXT: pand %xmm9, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,6,6,7]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,5]
-; SSE-NEXT: packuswb %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm9, %xmm10
-; SSE-NEXT: pandn %xmm2, %xmm10
-; SSE-NEXT: pand %xmm9, %xmm0
-; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: packuswb %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
+; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm0[2,0]
-; SSE-NEXT: movaps %xmm2, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm0[2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa %xmm7, %xmm3
+; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,0]
+; SSE-NEXT: movaps %xmm3, %xmm4
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
+; SSE-NEXT: pxor %xmm15, %xmm15
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,1],xmm0[2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm5[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: pand %xmm12, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,4,5,6,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,0]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,4,6,7]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pandn %xmm2, %xmm12
-; SSE-NEXT: por %xmm12, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm8[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0,2]
+; SSE-NEXT: packuswb %xmm3, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,4,5,6,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,0]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,0,1,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,4,6,7]
+; SSE-NEXT: packuswb %xmm3, %xmm3
; SSE-NEXT: pand %xmm9, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,5]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pandn %xmm2, %xmm9
+; SSE-NEXT: pandn %xmm3, %xmm9
; SSE-NEXT: por %xmm0, %xmm9
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm3[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm11[0,2]
+; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: pand %xmm12, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,7,5]
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: pandn %xmm0, %xmm2
+; SSE-NEXT: por %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm9
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm15[8],xmm0[9],xmm15[9],xmm0[10],xmm15[10],xmm0[11],xmm15[11],xmm0[12],xmm15[12],xmm0[13],xmm15[13],xmm0[14],xmm15[14],xmm0[15],xmm15[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,1,3]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[1,2]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,3,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,3,0,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[2,1,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
-; SSE-NEXT: packuswb %xmm0, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,0,0,65535,65535]
-; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: packuswb %xmm4, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm2, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE-NEXT: movdqa %xmm10, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; SSE-NEXT: pand %xmm12, %xmm8
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm8, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: pandn %xmm2, %xmm7
-; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm1[8],xmm8[9],xmm1[9],xmm8[10],xmm1[10],xmm8[11],xmm1[11],xmm8[12],xmm1[12],xmm8[13],xmm1[13],xmm8[14],xmm1[14],xmm8[15],xmm1[15]
-; SSE-NEXT: pand %xmm6, %xmm8
-; SSE-NEXT: por %xmm7, %xmm8
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[3,1,0,3,4,5,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm15[8],xmm7[9],xmm15[9],xmm7[10],xmm15[10],xmm7[11],xmm15[11],xmm7[12],xmm15[12],xmm7[13],xmm15[13],xmm7[14],xmm15[14],xmm7[15],xmm15[15]
+; SSE-NEXT: pand %xmm10, %xmm7
+; SSE-NEXT: por %xmm3, %xmm7
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[3,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,4]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: # xmm3 = mem[3,1,2,3]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT: # xmm7 = mem[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,1,4,5,6,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,0,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
-; SSE-NEXT: packuswb %xmm0, %xmm7
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm7[2,1]
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1],xmm15[2],xmm1[2],xmm15[3],xmm1[3],xmm15[4],xmm1[4],xmm15[5],xmm1[5],xmm15[6],xmm1[6],xmm15[7],xmm1[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm15[0,1,1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,3],xmm0[1,2]
-; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,2,3,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm15[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
-; SSE-NEXT: packuswb %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm4, %xmm7
-; SSE-NEXT: pandn %xmm3, %xmm7
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: pand %xmm12, %xmm15
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm15, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE-NEXT: movdqa %xmm6, %xmm8
-; SSE-NEXT: pandn %xmm3, %xmm8
-; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm1[8],xmm15[9],xmm1[9],xmm15[10],xmm1[10],xmm15[11],xmm1[11],xmm15[12],xmm1[12],xmm15[13],xmm1[13],xmm15[14],xmm1[14],xmm15[15],xmm1[15]
-; SSE-NEXT: pand %xmm6, %xmm15
-; SSE-NEXT: por %xmm8, %xmm15
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm15[3,1,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,4]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm3[0,1,2,3,6,5,6,7]
-; SSE-NEXT: packuswb %xmm8, %xmm8
-; SSE-NEXT: pand %xmm4, %xmm8
-; SSE-NEXT: por %xmm7, %xmm8
-; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: # xmm3 = mem[3,1,2,3]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT: # xmm7 = mem[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,1,4,5,6,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,0,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
-; SSE-NEXT: packuswb %xmm0, %xmm7
-; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm7[2,1]
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1],xmm13[2],xmm1[2],xmm13[3],xmm1[3],xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[0,1,1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,3],xmm0[1,2]
-; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,2,3,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
-; SSE-NEXT: packuswb %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm4, %xmm7
-; SSE-NEXT: pandn %xmm3, %xmm7
-; SSE-NEXT: movdqa (%rsp), %xmm13 # 16-byte Reload
-; SSE-NEXT: pand %xmm12, %xmm13
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm13, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE-NEXT: movdqa %xmm6, %xmm5
-; SSE-NEXT: pandn %xmm3, %xmm5
-; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm1[8],xmm13[9],xmm1[9],xmm13[10],xmm1[10],xmm13[11],xmm1[11],xmm13[12],xmm1[12],xmm13[13],xmm1[13],xmm13[14],xmm1[14],xmm13[15],xmm1[15]
-; SSE-NEXT: pand %xmm6, %xmm13
-; SSE-NEXT: por %xmm5, %xmm13
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm13[3,1,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,4]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,6,5,6,7]
-; SSE-NEXT: packuswb %xmm5, %xmm5
-; SSE-NEXT: pand %xmm4, %xmm5
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,6,5,6,7]
+; SSE-NEXT: packuswb %xmm3, %xmm3
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: por %xmm6, %xmm3
+; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: # xmm2 = mem[3,1,2,3]
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: # xmm6 = mem[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,0,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
+; SSE-NEXT: packuswb %xmm4, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,1]
+; SSE-NEXT: movdqa (%rsp), %xmm5 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm15[8],xmm2[9],xmm15[9],xmm2[10],xmm15[10],xmm2[11],xmm15[11],xmm2[12],xmm15[12],xmm2[13],xmm15[13],xmm2[14],xmm15[14],xmm2[15],xmm15[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3],xmm5[4],xmm15[4],xmm5[5],xmm15[5],xmm5[6],xmm15[6],xmm5[7],xmm15[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,1,1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm2[1,2]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,3,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm5[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,7]
+; SSE-NEXT: packuswb %xmm2, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm4, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm5, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3],xmm4[4],xmm15[4],xmm4[5],xmm15[5],xmm4[6],xmm15[6],xmm4[7],xmm15[7]
+; SSE-NEXT: movdqa %xmm10, %xmm7
+; SSE-NEXT: pandn %xmm4, %xmm7
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm15[8],xmm5[9],xmm15[9],xmm5[10],xmm15[10],xmm5[11],xmm15[11],xmm5[12],xmm15[12],xmm5[13],xmm15[13],xmm5[14],xmm15[14],xmm5[15],xmm15[15]
+; SSE-NEXT: pand %xmm10, %xmm5
; SSE-NEXT: por %xmm7, %xmm5
-; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: # xmm3 = mem[3,1,2,3]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT: # xmm7 = mem[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,1,4,5,6,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,0,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
-; SSE-NEXT: packuswb %xmm0, %xmm7
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm7[2,1]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3],xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,1,1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,3],xmm0[1,2]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm12, %xmm0
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; SSE-NEXT: por %xmm0, %xmm12
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm1[8],xmm12[9],xmm1[9],xmm12[10],xmm1[10],xmm12[11],xmm1[11],xmm12[12],xmm1[12],xmm12[13],xmm1[13],xmm12[14],xmm1[14],xmm12[15],xmm1[15]
-; SSE-NEXT: pand %xmm6, %xmm12
-; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: por %xmm12, %xmm6
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[3,1,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,4]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,3,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[2,1,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
-; SSE-NEXT: packuswb %xmm6, %xmm3
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: por %xmm4, %xmm0
-; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: # xmm3 = mem[3,1,2,3]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: # xmm4 = mem[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,1,4,5,6,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,0,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE-NEXT: packuswb %xmm6, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,1]
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps %xmm3, 16(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps %xmm3, 48(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps %xmm3, (%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps %xmm3, 32(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps %xmm3, 16(%rdx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps %xmm3, 48(%rdx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps %xmm3, (%rdx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movaps %xmm3, 32(%rdx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps %xmm1, 16(%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps %xmm1, 48(%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps %xmm1, (%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps %xmm1, 32(%rcx)
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[3,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6,4]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm4[0,1,2,3,6,5,6,7]
+; SSE-NEXT: packuswb %xmm7, %xmm7
+; SSE-NEXT: pand %xmm0, %xmm7
+; SSE-NEXT: por %xmm6, %xmm7
+; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; SSE-NEXT: # xmm4 = mem[3,1,2,3]
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: # xmm6 = mem[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,2,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,0,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
+; SSE-NEXT: packuswb %xmm2, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,1]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm15[8],xmm2[9],xmm15[9],xmm2[10],xmm15[10],xmm2[11],xmm15[11],xmm2[12],xmm15[12],xmm2[13],xmm15[13],xmm2[14],xmm15[14],xmm2[15],xmm15[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3],xmm5[4],xmm15[4],xmm5[5],xmm15[5],xmm5[6],xmm15[6],xmm5[7],xmm15[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,1,1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm2[1,2]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,3,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm5[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,7]
+; SSE-NEXT: packuswb %xmm2, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm4, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm5, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3],xmm4[4],xmm15[4],xmm4[5],xmm15[5],xmm4[6],xmm15[6],xmm4[7],xmm15[7]
+; SSE-NEXT: movdqa %xmm10, %xmm11
+; SSE-NEXT: pandn %xmm4, %xmm11
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm15[8],xmm5[9],xmm15[9],xmm5[10],xmm15[10],xmm5[11],xmm15[11],xmm5[12],xmm15[12],xmm5[13],xmm15[13],xmm5[14],xmm15[14],xmm5[15],xmm15[15]
+; SSE-NEXT: pand %xmm10, %xmm5
+; SSE-NEXT: por %xmm11, %xmm5
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[3,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6,4]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,6,7]
+; SSE-NEXT: packuswb %xmm4, %xmm4
+; SSE-NEXT: pand %xmm0, %xmm4
+; SSE-NEXT: por %xmm6, %xmm4
+; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: # xmm6 = mem[3,1,2,3]
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; SSE-NEXT: # xmm11 = mem[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,2,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[0,1,0,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm6[0],xmm11[1],xmm6[1]
+; SSE-NEXT: packuswb %xmm2, %xmm11
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm11[2,1]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm15[8],xmm2[9],xmm15[9],xmm2[10],xmm15[10],xmm2[11],xmm15[11],xmm2[12],xmm15[12],xmm2[13],xmm15[13],xmm2[14],xmm15[14],xmm2[15],xmm15[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3],xmm5[4],xmm15[4],xmm5[5],xmm15[5],xmm5[6],xmm15[6],xmm5[7],xmm15[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,1,1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm2[1,2]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; SSE-NEXT: por %xmm2, %xmm8
+; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm15[8],xmm8[9],xmm15[9],xmm8[10],xmm15[10],xmm8[11],xmm15[11],xmm8[12],xmm15[12],xmm8[13],xmm15[13],xmm8[14],xmm15[14],xmm8[15],xmm15[15]
+; SSE-NEXT: pand %xmm10, %xmm8
+; SSE-NEXT: pandn %xmm2, %xmm10
+; SSE-NEXT: por %xmm8, %xmm10
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,3,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm5[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,4,7]
+; SSE-NEXT: packuswb %xmm2, %xmm6
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm10[3,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,6,4]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,6,5,6,7]
+; SSE-NEXT: packuswb %xmm8, %xmm8
+; SSE-NEXT: pand %xmm0, %xmm8
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: por %xmm8, %xmm0
+; SSE-NEXT: pshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: # xmm6 = mem[3,1,2,3]
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; SSE-NEXT: # xmm8 = mem[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,2,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,0,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
+; SSE-NEXT: packuswb %xmm2, %xmm8
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm8[2,1]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, 16(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, 48(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, (%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, 32(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, 16(%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, 48(%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, (%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, 32(%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, 16(%rcx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, 48(%rcx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, (%rcx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps %xmm2, 32(%rcx)
; SSE-NEXT: movdqa %xmm9, 16(%r8)
-; SSE-NEXT: movdqa %xmm10, 48(%r8)
-; SSE-NEXT: movdqa %xmm11, (%r8)
+; SSE-NEXT: movdqa %xmm1, 48(%r8)
+; SSE-NEXT: movdqa %xmm13, (%r8)
; SSE-NEXT: movdqa %xmm14, 32(%r8)
; SSE-NEXT: movaps %xmm0, 16(%r9)
-; SSE-NEXT: movaps %xmm5, 48(%r9)
-; SSE-NEXT: movaps %xmm8, (%r9)
-; SSE-NEXT: movaps %xmm2, 32(%r9)
-; SSE-NEXT: addq $552, %rsp # imm = 0x228
+; SSE-NEXT: movaps %xmm4, 48(%r9)
+; SSE-NEXT: movaps %xmm7, (%r9)
+; SSE-NEXT: movaps %xmm3, 32(%r9)
+; SSE-NEXT: addq $568, %rsp # imm = 0x238
; SSE-NEXT: retq
;
; AVX-LABEL: load_i8_stride5_vf64:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index c77b232fde969a..a8a8494dde6f09 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -1385,299 +1385,299 @@ define void @load_i8_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
; SSE-LABEL: load_i8_stride6_vf16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa 64(%rdi), %xmm10
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 64(%rdi), %xmm7
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa (%rdi), %xmm5
-; SSE-NEXT: movdqa 16(%rdi), %xmm1
-; SSE-NEXT: movdqa 32(%rdi), %xmm7
-; SSE-NEXT: movdqa 48(%rdi), %xmm6
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: pandn %xmm7, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: pandn %xmm6, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: pandn %xmm6, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm4, %xmm6
-; SSE-NEXT: por %xmm0, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[0,3,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
-; SSE-NEXT: packuswb %xmm3, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,0,0,65535,65535]
-; SSE-NEXT: movdqa %xmm8, %xmm9
-; SSE-NEXT: pandn %xmm0, %xmm9
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm2, %xmm11
-; SSE-NEXT: pandn %xmm1, %xmm11
-; SSE-NEXT: pand %xmm4, %xmm10
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: movdqa 16(%rdi), %xmm0
+; SSE-NEXT: movdqa 32(%rdi), %xmm15
+; SSE-NEXT: movdqa 48(%rdi), %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: pandn %xmm15, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm6, %xmm3
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,5,6,5]
+; SSE-NEXT: packuswb %xmm2, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pandn %xmm4, %xmm9
+; SSE-NEXT: movdqa %xmm8, %xmm11
+; SSE-NEXT: pandn %xmm0, %xmm11
+; SSE-NEXT: movdqa %xmm7, %xmm10
+; SSE-NEXT: pand %xmm6, %xmm10
+; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: pandn %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm7
; SSE-NEXT: movdqa %xmm5, %xmm14
-; SSE-NEXT: pand %xmm4, %xmm14
-; SSE-NEXT: movdqa 80(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm13
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm4, %xmm13
-; SSE-NEXT: movdqa %xmm7, %xmm15
-; SSE-NEXT: pand %xmm4, %xmm7
-; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: pand %xmm6, %xmm14
+; SSE-NEXT: movdqa 80(%rdi), %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm13
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm4, %xmm12
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: pandn %xmm5, %xmm4
-; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: pand %xmm6, %xmm13
+; SSE-NEXT: movdqa %xmm15, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm6, %xmm7
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, %xmm7
+; SSE-NEXT: movdqa %xmm6, %xmm12
+; SSE-NEXT: pandn %xmm5, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
; SSE-NEXT: por %xmm11, %xmm5
; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm5[0,2,1,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm1, %xmm11
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm0, %xmm11
; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[0,3,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,7,6,7]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm8, %xmm0
-; SSE-NEXT: por %xmm9, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm12
-; SSE-NEXT: por %xmm12, %xmm10
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm10[3,1,2,0]
-; SSE-NEXT: pand %xmm1, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,3,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,5]
-; SSE-NEXT: packuswb %xmm9, %xmm9
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; SSE-NEXT: movdqa %xmm11, %xmm12
-; SSE-NEXT: pandn %xmm9, %xmm12
-; SSE-NEXT: pand %xmm11, %xmm0
-; SSE-NEXT: por %xmm0, %xmm12
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,7,6,7]
+; SSE-NEXT: packuswb %xmm11, %xmm11
+; SSE-NEXT: pand %xmm1, %xmm11
+; SSE-NEXT: por %xmm9, %xmm11
+; SSE-NEXT: pandn %xmm2, %xmm7
+; SSE-NEXT: por %xmm7, %xmm10
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm10[3,1,2,0]
+; SSE-NEXT: pand %xmm0, %xmm7
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,3,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,5]
+; SSE-NEXT: packuswb %xmm7, %xmm9
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; SSE-NEXT: movdqa %xmm7, %xmm0
+; SSE-NEXT: pandn %xmm9, %xmm0
+; SSE-NEXT: pand %xmm7, %xmm11
+; SSE-NEXT: por %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pxor %xmm9, %xmm9
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; SSE-NEXT: packuswb %xmm6, %xmm1
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,7]
+; SSE-NEXT: movdqa %xmm3, %xmm11
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
+; SSE-NEXT: psrld $16, %xmm11
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
+; SSE-NEXT: packuswb %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm9[8],xmm3[9],xmm9[9],xmm3[10],xmm9[10],xmm3[11],xmm9[11],xmm3[12],xmm9[12],xmm3[13],xmm9[13],xmm3[14],xmm9[14],xmm3[15],xmm9[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7]
; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,0,4,5,6,7]
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,0,0,65535,65535]
-; SSE-NEXT: pand %xmm6, %xmm5
-; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: por %xmm5, %xmm6
-; SSE-NEXT: packuswb %xmm6, %xmm6
-; SSE-NEXT: pand %xmm8, %xmm6
-; SSE-NEXT: pandn %xmm1, %xmm8
-; SSE-NEXT: por %xmm8, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,0,65535,0,0,65535,65535]
+; SSE-NEXT: pand %xmm11, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm11
+; SSE-NEXT: por %xmm5, %xmm11
+; SSE-NEXT: packuswb %xmm11, %xmm11
+; SSE-NEXT: pand %xmm1, %xmm11
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: por %xmm11, %xmm1
; SSE-NEXT: movdqa %xmm10, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm10[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,4]
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm5
-; SSE-NEXT: por %xmm1, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
; SSE-NEXT: packuswb %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm11, %xmm10
+; SSE-NEXT: movdqa %xmm7, %xmm10
; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pand %xmm11, %xmm6
-; SSE-NEXT: por %xmm6, %xmm10
-; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm15
+; SSE-NEXT: pand %xmm7, %xmm1
+; SSE-NEXT: por %xmm1, %xmm10
+; SSE-NEXT: pand %xmm8, %xmm15
+; SSE-NEXT: movdqa %xmm8, %xmm11
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
-; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm14[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,4,5,6]
+; SSE-NEXT: packuswb %xmm1, %xmm3
+; SSE-NEXT: por %xmm4, %xmm14
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm14[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm8
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm5
-; SSE-NEXT: pandn %xmm1, %xmm5
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: por %xmm0, %xmm5
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pandn %xmm6, %xmm2
-; SSE-NEXT: por %xmm2, %xmm13
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[0,3,2,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pand %xmm8, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm11, %xmm8
-; SSE-NEXT: pandn %xmm0, %xmm8
-; SSE-NEXT: pand %xmm11, %xmm5
-; SSE-NEXT: por %xmm5, %xmm8
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pandn %xmm1, %xmm4
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
+; SSE-NEXT: por %xmm12, %xmm13
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm13[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
+; SSE-NEXT: packuswb %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm7, %xmm1
+; SSE-NEXT: pandn %xmm2, %xmm1
+; SSE-NEXT: pand %xmm7, %xmm4
+; SSE-NEXT: por %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm15, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3],xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm9[8],xmm15[9],xmm9[9],xmm15[10],xmm9[10],xmm15[11],xmm9[11],xmm15[12],xmm9[12],xmm15[13],xmm9[13],xmm15[14],xmm9[14],xmm15[15],xmm9[15]
-; SSE-NEXT: movdqa %xmm15, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
-; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm15[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[2,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm9[8],xmm14[9],xmm9[9],xmm14[10],xmm9[10],xmm14[11],xmm9[11],xmm14[12],xmm9[12],xmm14[13],xmm9[13],xmm14[14],xmm9[14],xmm14[15],xmm9[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm14[0,3,2,1]
+; SSE-NEXT: movdqa %xmm15, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[3,0]
+; SSE-NEXT: movaps %xmm2, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm15[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm15[2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
+; SSE-NEXT: packuswb %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3],xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm9[8],xmm14[9],xmm9[9],xmm14[10],xmm9[10],xmm14[11],xmm9[11],xmm14[12],xmm9[12],xmm14[13],xmm9[13],xmm14[14],xmm9[14],xmm14[15],xmm9[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm14[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,7,7,7]
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT: pand %xmm5, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm5
-; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm4
+; SSE-NEXT: pandn %xmm2, %xmm5
+; SSE-NEXT: por %xmm4, %xmm5
+; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: packuswb %xmm5, %xmm5
-; SSE-NEXT: pandn %xmm5, %xmm3
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
+; SSE-NEXT: pandn %xmm5, %xmm0
+; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm13, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm9[8],xmm2[9],xmm9[9],xmm2[10],xmm9[10],xmm2[11],xmm9[11],xmm2[12],xmm9[12],xmm2[13],xmm9[13],xmm2[14],xmm9[14],xmm2[15],xmm9[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1],xmm13[2],xmm9[2],xmm13[3],xmm9[3],xmm13[4],xmm9[4],xmm13[5],xmm9[5],xmm13[6],xmm9[6],xmm13[7],xmm9[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,2,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,7]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,0,65535,65535,0]
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pand %xmm11, %xmm3
-; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm0, %xmm11
-; SSE-NEXT: por %xmm3, %xmm11
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[0,2,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,0,65535,65535,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm2
+; SSE-NEXT: pandn %xmm2, %xmm7
+; SSE-NEXT: por %xmm0, %xmm7
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,0]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: pandn %xmm0, %xmm2
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,1,2,0]
+; SSE-NEXT: pand %xmm8, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[2,1,0,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm3, %xmm2
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT: movdqa %xmm6, %xmm1
-; SSE-NEXT: pand %xmm13, %xmm1
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[2,1,0,3,4,5,6,7]
+; SSE-NEXT: packuswb %xmm3, %xmm3
+; SSE-NEXT: pand %xmm12, %xmm3
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pand %xmm11, %xmm2
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,0,0,0]
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: por %xmm1, %xmm13
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,2,1,3]
-; SSE-NEXT: pand %xmm5, %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
-; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm6
-; SSE-NEXT: pandn %xmm1, %xmm6
-; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm1[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm1[2,3]
-; SSE-NEXT: psrlq $48, %xmm1
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,7]
-; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm9[8],xmm2[9],xmm9[9],xmm2[10],xmm9[10],xmm2[11],xmm9[11],xmm2[12],xmm9[12],xmm2[13],xmm9[13],xmm2[14],xmm9[14],xmm2[15],xmm9[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,5,5,5,5]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,0,65535,65535,65535]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,1,2,4,5,6,7]
-; SSE-NEXT: pand %xmm2, %xmm4
-; SSE-NEXT: pandn %xmm5, %xmm2
-; SSE-NEXT: por %xmm4, %xmm2
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm3, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: por %xmm2, %xmm11
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm11[0,2,1,3]
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
+; SSE-NEXT: packuswb %xmm2, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pandn %xmm4, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm13, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535,0,65535,0,0]
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm13[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,7,4]
-; SSE-NEXT: pandn %xmm4, %xmm3
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: packuswb %xmm3, %xmm1
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm12, (%rsi)
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm9[8],xmm3[9],xmm9[9],xmm3[10],xmm9[10],xmm3[11],xmm9[11],xmm3[12],xmm9[12],xmm3[13],xmm9[13],xmm3[14],xmm9[14],xmm3[15],xmm9[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; SSE-NEXT: movdqa %xmm5, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm3[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[2,3]
+; SSE-NEXT: psrlq $48, %xmm3
+; SSE-NEXT: psrldq {{.*#+}} xmm4 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,7]
+; SSE-NEXT: packuswb %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm9[8],xmm4[9],xmm9[9],xmm4[10],xmm9[10],xmm4[11],xmm9[11],xmm4[12],xmm9[12],xmm4[13],xmm9[13],xmm4[14],xmm9[14],xmm4[15],xmm9[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,1,1,2,4,5,6,7]
+; SSE-NEXT: pand %xmm5, %xmm6
+; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: packuswb %xmm5, %xmm5
+; SSE-NEXT: pand %xmm12, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm12
+; SSE-NEXT: por %xmm5, %xmm12
+; SSE-NEXT: movdqa %xmm11, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,0,65535,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm11[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,7,4]
+; SSE-NEXT: pandn %xmm5, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: pand %xmm0, %xmm12
+; SSE-NEXT: packuswb %xmm4, %xmm3
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: por %xmm12, %xmm0
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movaps %xmm3, (%rsi)
; SSE-NEXT: movdqa %xmm10, (%rdx)
-; SSE-NEXT: movdqa %xmm8, (%rcx)
-; SSE-NEXT: movdqa %xmm11, (%r8)
-; SSE-NEXT: movdqa %xmm6, (%r9)
+; SSE-NEXT: movdqa %xmm1, (%rcx)
+; SSE-NEXT: movdqa %xmm7, (%r8)
+; SSE-NEXT: movdqa %xmm2, (%r9)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE-NEXT: movdqa %xmm0, (%rax)
; SSE-NEXT: retq
@@ -2542,229 +2542,229 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
; SSE-LABEL: load_i8_stride6_vf32:
; SSE: # %bb.0:
-; SSE-NEXT: subq $264, %rsp # imm = 0x108
-; SSE-NEXT: movdqa 64(%rdi), %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 80(%rdi), %xmm9
-; SSE-NEXT: movdqa (%rdi), %xmm12
-; SSE-NEXT: movdqa 16(%rdi), %xmm14
+; SSE-NEXT: subq $280, %rsp # imm = 0x118
+; SSE-NEXT: movdqa 64(%rdi), %xmm14
+; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 80(%rdi), %xmm10
+; SSE-NEXT: movdqa (%rdi), %xmm6
+; SSE-NEXT: movdqa 16(%rdi), %xmm13
; SSE-NEXT: movdqa 32(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 48(%rdi), %xmm5
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT: movdqa %xmm11, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT: movdqa %xmm12, %xmm1
; SSE-NEXT: pandn %xmm5, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: pandn %xmm5, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm5, %xmm15
-; SSE-NEXT: pand %xmm10, %xmm15
+; SSE-NEXT: pand %xmm4, %xmm15
; SSE-NEXT: por %xmm0, %xmm15
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: movdqa %xmm15, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,0,0,0,65535,65535]
; SSE-NEXT: movdqa %xmm11, %xmm1
-; SSE-NEXT: pandn %xmm14, %xmm1
-; SSE-NEXT: movdqa %xmm12, %xmm8
-; SSE-NEXT: pand %xmm11, %xmm8
-; SSE-NEXT: por %xmm1, %xmm8
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,2,1,3]
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
-; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: pandn %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: pand %xmm12, %xmm9
+; SSE-NEXT: por %xmm0, %xmm9
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,2,1,3]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: pandn %xmm10, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm14
+; SSE-NEXT: por %xmm1, %xmm14
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[3,1,2,0]
; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm9, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm7
-; SSE-NEXT: por %xmm0, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[3,1,2,0]
-; SSE-NEXT: pand %xmm6, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
-; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
+; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 128(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa 144(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm11, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: movdqa 144(%rdi), %xmm7
+; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: pandn %xmm7, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm2
+; SSE-NEXT: pandn %xmm7, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: pand %xmm10, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: pand %xmm4, %xmm7
+; SSE-NEXT: por %xmm0, %xmm7
+; SSE-NEXT: movdqa %xmm7, %xmm0
+; SSE-NEXT: pand %xmm5, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
-; SSE-NEXT: packuswb %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm6
-; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm12, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 112(%rdi), %xmm0
-; SSE-NEXT: movdqa %xmm11, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: movdqa 160(%rdi), %xmm5
-; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm5
-; SSE-NEXT: movdqa %xmm10, %xmm4
-; SSE-NEXT: pandn %xmm14, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm12
-; SSE-NEXT: movdqa %xmm11, %xmm4
-; SSE-NEXT: pandn %xmm9, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm11
-; SSE-NEXT: pand %xmm10, %xmm11
-; SSE-NEXT: movdqa %xmm10, %xmm4
-; SSE-NEXT: pandn %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 96(%rdi), %xmm13
-; SSE-NEXT: movdqa %xmm13, %xmm4
-; SSE-NEXT: pand %xmm10, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 176(%rdi), %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm10
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm1, %xmm10
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm9, %xmm10
-; SSE-NEXT: pand %xmm1, %xmm9
-; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm1, %xmm14
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm14, %xmm9
-; SSE-NEXT: pand %xmm1, %xmm14
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm1, %xmm14
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
+; SSE-NEXT: packuswb %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm11, %xmm8
+; SSE-NEXT: pandn %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: pandn %xmm6, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
-; SSE-NEXT: pandn %xmm13, %xmm1
+; SSE-NEXT: movdqa 112(%rdi), %xmm1
+; SSE-NEXT: movdqa %xmm12, %xmm4
+; SSE-NEXT: pandn %xmm1, %xmm4
+; SSE-NEXT: movdqa 160(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm13, %xmm5
+; SSE-NEXT: pandn %xmm13, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm6, (%rsp) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: pandn %xmm10, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, %xmm12
+; SSE-NEXT: pand %xmm0, %xmm12
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 96(%rdi), %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 176(%rdi), %xmm6
+; SSE-NEXT: movdqa %xmm6, %xmm13
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm13
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm10, %xmm13
+; SSE-NEXT: pand %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm5, %xmm10
+; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,2,1,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,3,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
-; SSE-NEXT: packuswb %xmm3, %xmm3
-; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
-; SSE-NEXT: por %xmm6, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm14
-; SSE-NEXT: por %xmm14, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[3,1,2,0]
-; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
+; SSE-NEXT: por %xmm4, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,2,1,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,1,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,7]
; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: pandn %xmm4, %xmm0
-; SSE-NEXT: pand %xmm13, %xmm3
-; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm11, %xmm4
+; SSE-NEXT: por %xmm8, %xmm4
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[3,1,2,0]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pandn %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: por %xmm4, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pxor %xmm4, %xmm4
-; SSE-NEXT: movdqa %xmm15, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE-NEXT: movdqa %xmm15, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3],xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm15[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm3[0],xmm14[1],xmm3[1],xmm14[2],xmm3[2],xmm14[3],xmm3[3]
-; SSE-NEXT: psrld $16, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm15[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm3[2],xmm15[3],xmm3[3]
-; SSE-NEXT: packuswb %xmm15, %xmm14
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,0,0,0,65535,65535]
-; SSE-NEXT: movdqa %xmm6, %xmm3
-; SSE-NEXT: pandn %xmm14, %xmm3
-; SSE-NEXT: movdqa %xmm8, %xmm14
-; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm4[8],xmm14[9],xmm4[9],xmm14[10],xmm4[10],xmm14[11],xmm4[11],xmm14[12],xmm4[12],xmm14[13],xmm4[13],xmm14[14],xmm4[14],xmm14[15],xmm4[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm14[1,1,1,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
+; SSE-NEXT: packuswb %xmm15, %xmm6
+; SSE-NEXT: movdqa %xmm11, %xmm5
+; SSE-NEXT: pandn %xmm6, %xmm5
+; SSE-NEXT: movdqa %xmm9, %xmm6
+; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,65535,0,65535,0,0,65535,65535]
; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm14, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm8[1,3,2,0,4,5,6,7]
-; SSE-NEXT: pand %xmm15, %xmm14
-; SSE-NEXT: por %xmm0, %xmm14
-; SSE-NEXT: packuswb %xmm14, %xmm14
-; SSE-NEXT: pand %xmm6, %xmm14
-; SSE-NEXT: por %xmm3, %xmm14
-; SSE-NEXT: movdqa %xmm7, %xmm0
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1],xmm9[2],xmm4[2],xmm9[3],xmm4[3],xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,3,2,0,4,5,6,7]
+; SSE-NEXT: pand %xmm15, %xmm6
+; SSE-NEXT: por %xmm0, %xmm6
+; SSE-NEXT: packuswb %xmm6, %xmm6
+; SSE-NEXT: pand %xmm11, %xmm6
+; SSE-NEXT: por %xmm5, %xmm6
+; SSE-NEXT: movdqa %xmm14, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm8, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm7[3,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm8, %xmm5
+; SSE-NEXT: pandn %xmm0, %xmm5
+; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm4[8],xmm14[9],xmm4[9],xmm14[10],xmm4[10],xmm14[11],xmm4[11],xmm14[12],xmm4[12],xmm14[13],xmm4[13],xmm14[14],xmm4[14],xmm14[15],xmm4[15]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm14[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,4]
; SSE-NEXT: pand %xmm8, %xmm0
-; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: por %xmm5, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm13, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: pand %xmm13, %xmm14
-; SSE-NEXT: por %xmm14, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pandn %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm6
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm7, %xmm0
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm7[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,7,6,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE-NEXT: packuswb %xmm2, %xmm3
+; SSE-NEXT: packuswb %xmm2, %xmm5
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
@@ -2778,32 +2778,31 @@ define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pandn %xmm0, %xmm15
; SSE-NEXT: por %xmm1, %xmm15
; SSE-NEXT: packuswb %xmm15, %xmm15
-; SSE-NEXT: pand %xmm6, %xmm15
-; SSE-NEXT: pandn %xmm3, %xmm6
-; SSE-NEXT: por %xmm6, %xmm15
-; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm15
+; SSE-NEXT: pandn %xmm5, %xmm11
+; SSE-NEXT: por %xmm15, %xmm11
+; SSE-NEXT: movdqa %xmm3, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[3,1,2,3,4,5,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4]
; SSE-NEXT: pand %xmm8, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm8
; SSE-NEXT: por %xmm1, %xmm8
; SSE-NEXT: packuswb %xmm8, %xmm0
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pand %xmm13, %xmm15
-; SSE-NEXT: movdqa %xmm13, %xmm7
-; SSE-NEXT: por %xmm15, %xmm1
+; SSE-NEXT: pand %xmm9, %xmm11
+; SSE-NEXT: por %xmm11, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT: pand %xmm5, %xmm10
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: pand %xmm5, %xmm13
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm8, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7]
@@ -2811,10 +2810,11 @@ define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,5,6]
; SSE-NEXT: packuswb %xmm1, %xmm2
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm12[2,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa (%rsp), %xmm6 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
-; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: pand %xmm8, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,5,5,5,5]
@@ -2824,26 +2824,25 @@ define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pandn %xmm1, %xmm3
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: por %xmm2, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pandn %xmm14, %xmm1
-; SSE-NEXT: por %xmm1, %xmm11
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: por %xmm1, %xmm12
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm12[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm13, %xmm2
+; SSE-NEXT: movdqa %xmm9, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pand %xmm13, %xmm3
+; SSE-NEXT: pand %xmm9, %xmm3
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm5, %xmm9
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm10
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm1
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,3,4,5,6,7]
@@ -2851,11 +2850,11 @@ define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,6]
; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm13[2,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm14[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
-; SSE-NEXT: pand %xmm15, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
@@ -2864,306 +2863,305 @@ define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: por %xmm1, %xmm8
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[0,3,2,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: por %xmm1, %xmm11
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: movdqa %xmm9, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pand %xmm7, %xmm3
+; SSE-NEXT: pand %xmm9, %xmm3
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm13, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
-; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm4[8],xmm13[9],xmm4[9],xmm13[10],xmm4[10],xmm13[11],xmm4[11],xmm13[12],xmm4[12],xmm13[13],xmm4[13],xmm13[14],xmm4[14],xmm13[15],xmm4[15]
+; SSE-NEXT: movdqa %xmm13, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[3,0]
; SSE-NEXT: movaps %xmm1, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm2[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm10[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm10[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm13[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm13[2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: movdqa %xmm6, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[3,1,2,1,4,5,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,0,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm1, %xmm5
; SSE-NEXT: pandn %xmm3, %xmm5
-; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm4[8],xmm12[9],xmm4[9],xmm12[10],xmm4[10],xmm12[11],xmm4[11],xmm12[12],xmm4[12],xmm12[13],xmm4[13],xmm12[14],xmm4[14],xmm12[15],xmm4[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[0,3,2,1]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,7,7,7]
; SSE-NEXT: pand %xmm1, %xmm3
; SSE-NEXT: por %xmm5, %xmm3
; SSE-NEXT: packuswb %xmm3, %xmm3
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: pandn %xmm3, %xmm5
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm6
; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm11, %xmm2
+; SSE-NEXT: por %xmm2, %xmm6
+; SSE-NEXT: movdqa %xmm12, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,7,5,6,5]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,0,65535,65535,0]
-; SSE-NEXT: movdqa %xmm2, %xmm6
-; SSE-NEXT: pandn %xmm3, %xmm6
-; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[0,2,0,3]
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm5
+; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[0,2,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
; SSE-NEXT: pand %xmm2, %xmm3
-; SSE-NEXT: por %xmm6, %xmm3
+; SSE-NEXT: por %xmm5, %xmm3
; SSE-NEXT: packuswb %xmm3, %xmm3
-; SSE-NEXT: movdqa %xmm7, %xmm6
-; SSE-NEXT: pandn %xmm3, %xmm6
-; SSE-NEXT: pand %xmm7, %xmm5
-; SSE-NEXT: por %xmm5, %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: movdqa %xmm9, %xmm15
+; SSE-NEXT: pandn %xmm3, %xmm15
+; SSE-NEXT: pand %xmm9, %xmm6
+; SSE-NEXT: por %xmm6, %xmm15
+; SSE-NEXT: movdqa %xmm10, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
-; SSE-NEXT: movdqa %xmm9, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[3,0]
-; SSE-NEXT: movaps %xmm3, %xmm6
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm9[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm9[2,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,2]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
+; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm3[3,0]
+; SSE-NEXT: movaps %xmm3, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm6[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm10[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm10[2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm3, %xmm5
-; SSE-NEXT: movdqa %xmm13, %xmm3
+; SSE-NEXT: packuswb %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm14, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,1,4,5,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm4[8],xmm13[9],xmm4[9],xmm13[10],xmm4[10],xmm13[11],xmm4[11],xmm13[12],xmm4[12],xmm13[13],xmm4[13],xmm13[14],xmm4[14],xmm13[15],xmm4[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm13[0,3,2,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
-; SSE-NEXT: pand %xmm1, %xmm6
+; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm4[8],xmm14[9],xmm4[9],xmm14[10],xmm4[10],xmm14[11],xmm4[11],xmm14[12],xmm4[12],xmm14[13],xmm4[13],xmm14[14],xmm4[14],xmm14[15],xmm4[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm14[0,3,2,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pand %xmm1, %xmm7
; SSE-NEXT: pandn %xmm3, %xmm1
-; SSE-NEXT: por %xmm6, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: por %xmm7, %xmm1
+; SSE-NEXT: pand %xmm0, %xmm6
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: por %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm1
+; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm11, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,2,0,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[0,2,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm7, %xmm13
-; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: pand %xmm9, %xmm0
; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: pandn %xmm1, %xmm13
-; SSE-NEXT: por %xmm0, %xmm13
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: pand %xmm8, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
-; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[3,1,2,0]
-; SSE-NEXT: pand %xmm15, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[2,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,2]
+; SSE-NEXT: packuswb %xmm1, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: pandn %xmm2, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[3,1,2,0]
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm3, %xmm3
-; SSE-NEXT: pand %xmm2, %xmm3
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm14, %xmm11
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT: pand %xmm12, %xmm11
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,2,1,3]
-; SSE-NEXT: pand %xmm15, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
-; SSE-NEXT: packuswb %xmm0, %xmm5
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,0,0,0]
-; SSE-NEXT: movdqa %xmm0, %xmm8
-; SSE-NEXT: pandn %xmm5, %xmm8
; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: por %xmm3, %xmm8
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT: pand %xmm13, %xmm12
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[0,2,1,3]
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
+; SSE-NEXT: packuswb %xmm1, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,0,0,0]
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: pandn %xmm6, %xmm5
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: por %xmm3, %xmm5
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pand %xmm15, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
-; SSE-NEXT: packuswb %xmm5, %xmm3
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: pandn %xmm3, %xmm5
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,1,2,0]
-; SSE-NEXT: pand %xmm15, %xmm3
+; SSE-NEXT: packuswb %xmm6, %xmm3
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[3,1,2,0]
+; SSE-NEXT: pand %xmm8, %xmm3
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm3[2,1,0,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm6, %xmm6
-; SSE-NEXT: pand %xmm2, %xmm6
-; SSE-NEXT: por %xmm5, %xmm6
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm3[2,1,0,3,4,5,6,7]
+; SSE-NEXT: packuswb %xmm7, %xmm7
+; SSE-NEXT: pand %xmm0, %xmm7
+; SSE-NEXT: por %xmm6, %xmm7
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pand %xmm12, %xmm3
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; SSE-NEXT: por %xmm3, %xmm12
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[0,2,1,3]
-; SSE-NEXT: pand %xmm15, %xmm3
+; SSE-NEXT: pand %xmm13, %xmm3
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
+; SSE-NEXT: por %xmm3, %xmm13
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[0,2,1,3]
+; SSE-NEXT: pand %xmm8, %xmm3
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
-; SSE-NEXT: packuswb %xmm3, %xmm5
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm5, %xmm3
-; SSE-NEXT: pand %xmm0, %xmm6
-; SSE-NEXT: por %xmm6, %xmm3
-; SSE-NEXT: movdqa %xmm7, %xmm5
-; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
-; SSE-NEXT: movdqa %xmm7, %xmm6
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm5[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[2,3]
-; SSE-NEXT: psrlq $48, %xmm5
-; SSE-NEXT: psrldq {{.*#+}} xmm6 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,5,7]
-; SSE-NEXT: packuswb %xmm6, %xmm5
-; SSE-NEXT: movdqa %xmm2, %xmm6
-; SSE-NEXT: pandn %xmm5, %xmm6
-; SSE-NEXT: movdqa %xmm9, %xmm5
-; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,5,5,5,5]
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm5, %xmm10
-; SSE-NEXT: pandn %xmm7, %xmm10
+; SSE-NEXT: packuswb %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm6, %xmm3
+; SSE-NEXT: pand %xmm1, %xmm7
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm9, %xmm6
+; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1],xmm9[2],xmm4[2],xmm9[3],xmm4[3],xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm9[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm7[3,1,1,2,4,5,6,7]
-; SSE-NEXT: pand %xmm5, %xmm9
-; SSE-NEXT: por %xmm10, %xmm9
-; SSE-NEXT: packuswb %xmm9, %xmm9
-; SSE-NEXT: pand %xmm2, %xmm9
-; SSE-NEXT: por %xmm6, %xmm9
-; SSE-NEXT: movdqa %xmm11, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm4[8],xmm11[9],xmm4[9],xmm11[10],xmm4[10],xmm11[11],xmm4[11],xmm11[12],xmm4[12],xmm11[13],xmm4[13],xmm11[14],xmm4[14],xmm11[15],xmm4[15]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm11[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm7[0,1,2,3,5,5,7,4]
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,0,65535,0,0]
-; SSE-NEXT: movdqa %xmm7, %xmm11
-; SSE-NEXT: pandn %xmm10, %xmm11
-; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pand %xmm7, %xmm6
-; SSE-NEXT: por %xmm6, %xmm11
-; SSE-NEXT: packuswb %xmm11, %xmm10
-; SSE-NEXT: movdqa %xmm0, %xmm6
-; SSE-NEXT: pandn %xmm10, %xmm6
-; SSE-NEXT: pand %xmm0, %xmm9
-; SSE-NEXT: por %xmm9, %xmm6
-; SSE-NEXT: movdqa %xmm14, %xmm11
-; SSE-NEXT: movdqa %xmm14, %xmm9
-; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; SSE-NEXT: movdqa %xmm11, %xmm10
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm9[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm9[2,3]
-; SSE-NEXT: psrlq $48, %xmm9
-; SSE-NEXT: psrldq {{.*#+}} xmm10 = xmm10[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm11[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,5,7]
-; SSE-NEXT: packuswb %xmm10, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: movdqa %xmm9, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0],xmm6[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0],xmm6[2,3]
+; SSE-NEXT: psrlq $48, %xmm6
+; SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,5,7]
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: movdqa %xmm0, %xmm7
+; SSE-NEXT: pandn %xmm6, %xmm7
+; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm6[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm6, %xmm11
+; SSE-NEXT: pandn %xmm9, %xmm11
+; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3],xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm10[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm9[3,1,1,2,4,5,6,7]
+; SSE-NEXT: pand %xmm6, %xmm10
+; SSE-NEXT: por %xmm11, %xmm10
+; SSE-NEXT: packuswb %xmm10, %xmm10
+; SSE-NEXT: pand %xmm0, %xmm10
+; SSE-NEXT: por %xmm7, %xmm10
+; SSE-NEXT: movdqa %xmm12, %xmm7
+; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm4[8],xmm12[9],xmm4[9],xmm12[10],xmm4[10],xmm12[11],xmm4[11],xmm12[12],xmm4[12],xmm12[13],xmm4[13],xmm12[14],xmm4[14],xmm12[15],xmm4[15]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm12[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm9[0,1,2,3,5,5,7,4]
+; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,65535,0,65535,0,0]
+; SSE-NEXT: movdqa %xmm9, %xmm12
+; SSE-NEXT: pandn %xmm11, %xmm12
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,3,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
+; SSE-NEXT: pand %xmm9, %xmm7
+; SSE-NEXT: por %xmm7, %xmm12
+; SSE-NEXT: packuswb %xmm12, %xmm11
+; SSE-NEXT: movdqa %xmm1, %xmm7
+; SSE-NEXT: pandn %xmm11, %xmm7
+; SSE-NEXT: pand %xmm1, %xmm10
+; SSE-NEXT: por %xmm10, %xmm7
+; SSE-NEXT: movdqa %xmm14, %xmm10
; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm1[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[3,1,1,2,4,5,6,7]
-; SSE-NEXT: pand %xmm5, %xmm11
-; SSE-NEXT: pandn %xmm10, %xmm5
-; SSE-NEXT: por %xmm11, %xmm5
-; SSE-NEXT: packuswb %xmm5, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
-; SSE-NEXT: pandn %xmm9, %xmm2
-; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm4[0],xmm14[1],xmm4[1],xmm14[2],xmm4[2],xmm14[3],xmm4[3],xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7]
+; SSE-NEXT: movdqa %xmm14, %xmm11
+; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,0],xmm10[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,0],xmm10[2,3]
+; SSE-NEXT: psrlq $48, %xmm10
+; SSE-NEXT: psrldq {{.*#+}} xmm11 = xmm11[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm14[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,5,7]
+; SSE-NEXT: packuswb %xmm11, %xmm10
+; SSE-NEXT: movdqa %xmm2, %xmm11
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm4[8],xmm11[9],xmm4[9],xmm11[10],xmm4[10],xmm11[11],xmm4[11],xmm11[12],xmm4[12],xmm11[13],xmm4[13],xmm11[14],xmm4[14],xmm11[15],xmm4[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[1,1,2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm4[8],xmm12[9],xmm4[9],xmm12[10],xmm4[10],xmm12[11],xmm4[11],xmm12[12],xmm4[12],xmm12[13],xmm4[13],xmm12[14],xmm4[14],xmm12[15],xmm4[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pand %xmm7, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm12[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,7,4]
-; SSE-NEXT: pandn %xmm4, %xmm7
-; SSE-NEXT: por %xmm2, %xmm7
-; SSE-NEXT: pand %xmm0, %xmm5
-; SSE-NEXT: packuswb %xmm7, %xmm2
-; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: por %xmm5, %xmm0
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movaps %xmm2, 16(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movaps %xmm2, (%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps %xmm1, 16(%rdx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps %xmm1, (%rdx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps %xmm1, 16(%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps %xmm1, (%rcx)
-; SSE-NEXT: movdqa %xmm13, 16(%r8)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movaps %xmm1, (%r8)
+; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm2[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[3,1,1,2,4,5,6,7]
+; SSE-NEXT: pand %xmm6, %xmm12
+; SSE-NEXT: pandn %xmm11, %xmm6
+; SSE-NEXT: por %xmm12, %xmm6
+; SSE-NEXT: packuswb %xmm6, %xmm6
+; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm13, %xmm6
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm4[8],xmm13[9],xmm4[9],xmm13[10],xmm4[10],xmm13[11],xmm4[11],xmm13[12],xmm4[12],xmm13[13],xmm4[13],xmm13[14],xmm4[14],xmm13[15],xmm4[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[0,3,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
+; SSE-NEXT: pand %xmm9, %xmm4
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm13[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,7,4]
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: por %xmm4, %xmm9
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm9, %xmm4
+; SSE-NEXT: pandn %xmm4, %xmm1
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, 16(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, (%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, 16(%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, (%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, 16(%rcx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, (%rcx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, 16(%r8)
+; SSE-NEXT: movdqa %xmm15, (%r8)
; SSE-NEXT: movdqa %xmm3, 16(%r9)
-; SSE-NEXT: movdqa %xmm8, (%r9)
+; SSE-NEXT: movdqa %xmm5, (%r9)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movdqa %xmm0, 16(%rax)
-; SSE-NEXT: movdqa %xmm6, (%rax)
-; SSE-NEXT: addq $264, %rsp # imm = 0x108
+; SSE-NEXT: movdqa %xmm1, 16(%rax)
+; SSE-NEXT: movdqa %xmm7, (%rax)
+; SSE-NEXT: addq $280, %rsp # imm = 0x118
; SSE-NEXT: retq
;
; AVX-LABEL: load_i8_stride6_vf32:
@@ -4624,126 +4622,129 @@ define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
; SSE-LABEL: load_i8_stride6_vf64:
; SSE: # %bb.0:
-; SSE-NEXT: subq $792, %rsp # imm = 0x318
-; SSE-NEXT: movdqa 64(%rdi), %xmm4
+; SSE-NEXT: subq $840, %rsp # imm = 0x348
+; SSE-NEXT: movdqa 64(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 80(%rdi), %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 80(%rdi), %xmm5
+; SSE-NEXT: movdqa (%rdi), %xmm8
+; SSE-NEXT: movdqa 16(%rdi), %xmm5
; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa (%rdi), %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 16(%rdi), %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 32(%rdi), %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 48(%rdi), %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm13, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm9, %xmm6
+; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm9, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,0,0,65535,65535]
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: pandn %xmm6, %xmm1
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: pand %xmm3, %xmm2
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,1,3]
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
-; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm8, %xmm1
-; SSE-NEXT: movdqa %xmm8, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE-NEXT: movdqa %xmm11, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: pandn %xmm5, %xmm0
-; SSE-NEXT: pand %xmm13, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,0]
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
+; SSE-NEXT: movdqa %xmm8, %xmm5
+; SSE-NEXT: movdqa %xmm8, %xmm7
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: por %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,1,3]
+; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: pandn %xmm4, %xmm1
+; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[3,1,2,0]
+; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
+; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm1
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 320(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm9, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa 336(%rdi), %xmm12
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: pandn %xmm12, %xmm1
+; SSE-NEXT: movdqa 336(%rdi), %xmm10
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: pandn %xmm10, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm1
-; SSE-NEXT: pandn %xmm12, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: pandn %xmm10, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm13, %xmm12
-; SSE-NEXT: por %xmm0, %xmm12
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: pand %xmm9, %xmm10
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm11, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa 304(%rdi), %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm3, %xmm7
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: movdqa 288(%rdi), %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm2
-; SSE-NEXT: pand %xmm3, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,1,3]
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: movdqa 304(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: movdqa 288(%rdi), %xmm12
+; SSE-NEXT: movdqa %xmm12, %xmm3
+; SSE-NEXT: pand %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,1,3]
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa 368(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm2
+; SSE-NEXT: movdqa %xmm9, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa 352(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm13, %xmm3
+; SSE-NEXT: movdqa 352(%rdi), %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,0]
-; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
@@ -4754,173 +4755,173 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa %xmm4, %xmm9
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 224(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa 240(%rdi), %xmm11
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: pandn %xmm11, %xmm2
+; SSE-NEXT: movdqa 224(%rdi), %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm2
-; SSE-NEXT: pandn %xmm11, %xmm2
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: movdqa 240(%rdi), %xmm14
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: pandn %xmm14, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm13, %xmm11
-; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pandn %xmm14, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm1, %xmm14
+; SSE-NEXT: por %xmm0, %xmm14
+; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: movdqa %xmm11, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: movdqa 208(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa 192(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: pand %xmm7, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,1,3]
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: movdqa 208(%rdi), %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: pandn %xmm4, %xmm0
+; SSE-NEXT: movdqa 192(%rdi), %xmm8
+; SSE-NEXT: movdqa %xmm8, %xmm5
+; SSE-NEXT: pand %xmm3, %xmm5
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: por %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,1,3]
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm8, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm10
+; SSE-NEXT: pand %xmm11, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa 272(%rdi), %xmm14
-; SSE-NEXT: movdqa %xmm13, %xmm2
-; SSE-NEXT: pandn %xmm14, %xmm2
-; SSE-NEXT: movdqa 256(%rdi), %xmm15
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm13, %xmm15
-; SSE-NEXT: por %xmm2, %xmm15
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[3,1,2,0]
-; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: movdqa 272(%rdi), %xmm15
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pandn %xmm15, %xmm2
+; SSE-NEXT: movdqa 256(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,0]
+; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pandn %xmm2, %xmm4
+; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: pand %xmm9, %xmm0
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 128(%rdi), %xmm2
; SSE-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: movdqa 144(%rdi), %xmm9
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: pandn %xmm9, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm4
-; SSE-NEXT: pandn %xmm9, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm13, %xmm9
-; SSE-NEXT: por %xmm0, %xmm9
-; SSE-NEXT: movdqa %xmm9, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: movdqa 144(%rdi), %xmm10
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: pandn %xmm10, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm10, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm1, %xmm10
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,5]
; SSE-NEXT: packuswb %xmm5, %xmm0
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: movdqa %xmm13, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pandn %xmm13, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pandn %xmm6, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm6, %xmm5
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: movdqa 112(%rdi), %xmm6
+; SSE-NEXT: movdqa %xmm11, %xmm13
+; SSE-NEXT: pandn %xmm0, %xmm13
+; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: movdqa %xmm7, %xmm8
-; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: movdqa 160(%rdi), %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm7
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pandn %xmm13, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: pandn %xmm7, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: pandn %xmm12, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm12, %xmm3
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: pandn %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm8, %xmm7
+; SSE-NEXT: movdqa 112(%rdi), %xmm6
+; SSE-NEXT: movdqa %xmm4, %xmm5
+; SSE-NEXT: movdqa %xmm4, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: movdqa 160(%rdi), %xmm12
+; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm12
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: pandn %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pandn %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: pandn %xmm1, %xmm8
+; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm8
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm3, %xmm8
+; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm7
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm15, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm15
+; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pandn %xmm6, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 96(%rdi), %xmm15
+; SSE-NEXT: movdqa %xmm15, %xmm5
+; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 176(%rdi), %xmm7
+; SSE-NEXT: movdqa %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm0, %xmm5
; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm0, %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm14, %xmm5
-; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm14
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pandn %xmm6, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 96(%rdi), %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 176(%rdi), %xmm14
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm13
-; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm0, %xmm1
@@ -4928,38 +4929,37 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: pand %xmm0, %xmm6
; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm0, %xmm13
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm15, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
-; SSE-NEXT: por %xmm8, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,2,1,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: movdqa %xmm15, %xmm2
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: por %xmm9, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,1,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm6, %xmm5
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,7]
; SSE-NEXT: packuswb %xmm5, %xmm5
-; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,0,0,65535,65535]
-; SSE-NEXT: pand %xmm8, %xmm5
-; SSE-NEXT: por %xmm10, %xmm5
-; SSE-NEXT: pandn %xmm14, %xmm0
-; SSE-NEXT: por %xmm0, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[3,1,2,0]
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm5
+; SSE-NEXT: por %xmm13, %xmm5
+; SSE-NEXT: pandn %xmm7, %xmm0
+; SSE-NEXT: por %xmm0, %xmm12
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[3,1,2,0]
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; SSE-NEXT: movdqa %xmm7, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm5
+; SSE-NEXT: pand %xmm7, %xmm5
; SSE-NEXT: por %xmm5, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pxor %xmm5, %xmm5
@@ -4970,194 +4970,194 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,2,3,3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm1[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm14 = xmm14[2],xmm0[2],xmm14[3],xmm0[3]
-; SSE-NEXT: packuswb %xmm14, %xmm4
-; SSE-NEXT: movdqa %xmm8, %xmm1
-; SSE-NEXT: pandn %xmm4, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm1[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
+; SSE-NEXT: packuswb %xmm15, %xmm4
+; SSE-NEXT: movdqa %xmm11, %xmm8
+; SSE-NEXT: pandn %xmm4, %xmm8
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,0,0,65535,65535]
-; SSE-NEXT: movdqa %xmm0, %xmm14
-; SSE-NEXT: pandn %xmm4, %xmm14
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,3,2,0,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: por %xmm14, %xmm4
-; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: pand %xmm8, %xmm4
-; SSE-NEXT: por %xmm1, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm15
+; SSE-NEXT: pandn %xmm4, %xmm15
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm1[0,1,2,3,5,5,5,5]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: pandn %xmm14, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm6[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,7,6,4]
-; SSE-NEXT: pand %xmm2, %xmm14
-; SSE-NEXT: por %xmm1, %xmm14
-; SSE-NEXT: packuswb %xmm14, %xmm1
-; SSE-NEXT: movdqa %xmm10, %xmm14
-; SSE-NEXT: pandn %xmm1, %xmm14
-; SSE-NEXT: pand %xmm10, %xmm4
-; SSE-NEXT: por %xmm4, %xmm14
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm12[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm1[2],xmm12[3],xmm1[3]
-; SSE-NEXT: packuswb %xmm12, %xmm4
-; SSE-NEXT: movdqa %xmm8, %xmm14
-; SSE-NEXT: movdqa %xmm8, %xmm1
-; SSE-NEXT: pandn %xmm4, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7]
-; SSE-NEXT: movdqa %xmm0, %xmm12
-; SSE-NEXT: pandn %xmm4, %xmm12
-; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm6[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,3,2,0,4,5,6,7]
; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: por %xmm12, %xmm4
+; SSE-NEXT: por %xmm15, %xmm4
; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: pand %xmm8, %xmm4
-; SSE-NEXT: por %xmm1, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm6, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; SSE-NEXT: movdqa %xmm2, %xmm12
-; SSE-NEXT: pandn %xmm1, %xmm12
-; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4]
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: por %xmm12, %xmm1
-; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm10, %xmm12
-; SSE-NEXT: pandn %xmm1, %xmm12
-; SSE-NEXT: pand %xmm10, %xmm4
-; SSE-NEXT: por %xmm4, %xmm12
-; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3],xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm11[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
-; SSE-NEXT: packuswb %xmm8, %xmm4
-; SSE-NEXT: movdqa %xmm14, %xmm1
-; SSE-NEXT: pandn %xmm4, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7]
-; SSE-NEXT: movdqa %xmm0, %xmm8
-; SSE-NEXT: pandn %xmm4, %xmm8
-; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm6[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,3,2,0,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm4
+; SSE-NEXT: pand %xmm11, %xmm4
; SSE-NEXT: por %xmm8, %xmm4
-; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: pand %xmm14, %xmm4
-; SSE-NEXT: por %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm15, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; SSE-NEXT: movdqa %xmm2, %xmm8
-; SSE-NEXT: pandn %xmm1, %xmm8
-; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm5[8],xmm15[9],xmm5[9],xmm15[10],xmm5[10],xmm15[11],xmm5[11],xmm15[12],xmm5[12],xmm15[13],xmm5[13],xmm15[14],xmm5[14],xmm15[15],xmm5[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm15[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,4]
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: por %xmm8, %xmm1
-; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm10, %xmm8
-; SSE-NEXT: pandn %xmm1, %xmm8
-; SSE-NEXT: pand %xmm10, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3],xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm8[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm1, %xmm8
+; SSE-NEXT: pandn %xmm15, %xmm8
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm3[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,7,6,4]
+; SSE-NEXT: pand %xmm1, %xmm15
+; SSE-NEXT: por %xmm8, %xmm15
+; SSE-NEXT: packuswb %xmm15, %xmm8
+; SSE-NEXT: movdqa %xmm7, %xmm15
+; SSE-NEXT: pandn %xmm8, %xmm15
+; SSE-NEXT: pand %xmm7, %xmm4
+; SSE-NEXT: por %xmm4, %xmm15
+; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
+; SSE-NEXT: psrld $16, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm3[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm13 = xmm13[2],xmm4[2],xmm13[3],xmm4[3]
+; SSE-NEXT: packuswb %xmm13, %xmm8
+; SSE-NEXT: movdqa %xmm11, %xmm4
+; SSE-NEXT: pandn %xmm8, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm13
+; SSE-NEXT: pandn %xmm8, %xmm13
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,3,2,0,4,5,6,7]
+; SSE-NEXT: pand %xmm0, %xmm8
+; SSE-NEXT: por %xmm13, %xmm8
+; SSE-NEXT: packuswb %xmm8, %xmm8
+; SSE-NEXT: pand %xmm11, %xmm8
; SSE-NEXT: por %xmm4, %xmm8
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3],xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm9[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
-; SSE-NEXT: packuswb %xmm6, %xmm4
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa %xmm1, %xmm13
+; SSE-NEXT: pandn %xmm4, %xmm13
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,4]
+; SSE-NEXT: pand %xmm1, %xmm4
+; SSE-NEXT: por %xmm13, %xmm4
+; SSE-NEXT: packuswb %xmm4, %xmm4
+; SSE-NEXT: movdqa %xmm7, %xmm13
+; SSE-NEXT: pandn %xmm4, %xmm13
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: por %xmm8, %xmm13
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm14, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3],xmm14[4],xmm5[4],xmm14[5],xmm5[5],xmm14[6],xmm5[6],xmm14[7],xmm5[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm14[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
+; SSE-NEXT: psrld $16, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm14[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
+; SSE-NEXT: packuswb %xmm9, %xmm8
+; SSE-NEXT: movdqa %xmm11, %xmm4
+; SSE-NEXT: pandn %xmm8, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm9
+; SSE-NEXT: pandn %xmm8, %xmm9
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,3,2,0,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,3,2,0,4,5,6,7]
+; SSE-NEXT: pand %xmm0, %xmm8
+; SSE-NEXT: por %xmm9, %xmm8
+; SSE-NEXT: packuswb %xmm8, %xmm8
+; SSE-NEXT: pand %xmm11, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pandn %xmm4, %xmm9
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,4]
+; SSE-NEXT: pand %xmm1, %xmm4
+; SSE-NEXT: por %xmm9, %xmm4
+; SSE-NEXT: packuswb %xmm4, %xmm4
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: movdqa %xmm7, %xmm3
+; SSE-NEXT: pandn %xmm4, %xmm3
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: por %xmm8, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3],xmm10[4],xmm5[4],xmm10[5],xmm5[5],xmm10[6],xmm5[6],xmm10[7],xmm5[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm10[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
+; SSE-NEXT: psrld $16, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE-NEXT: packuswb %xmm3, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,0,4,5,6,7]
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm14, %xmm1
-; SSE-NEXT: pand %xmm14, %xmm0
-; SSE-NEXT: pandn %xmm4, %xmm1
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm0
+; SSE-NEXT: pandn %xmm8, %xmm11
+; SSE-NEXT: por %xmm0, %xmm11
+; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm5[8],xmm12[9],xmm5[9],xmm12[10],xmm5[10],xmm12[11],xmm5[11],xmm12[12],xmm5[12],xmm12[13],xmm5[13],xmm12[14],xmm5[14],xmm12[15],xmm5[15]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm12[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,7,6,4]
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm5[8],xmm7[9],xmm5[9],xmm7[10],xmm5[10],xmm7[11],xmm5[11],xmm7[12],xmm5[12],xmm7[13],xmm5[13],xmm7[14],xmm5[14],xmm7[15],xmm5[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,4]
-; SSE-NEXT: pand %xmm2, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: movdqa %xmm10, %xmm11
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: pand %xmm7, %xmm11
+; SSE-NEXT: por %xmm11, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,0,65535,65535,0,65535,65535,0]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pand %xmm14, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: movdqa %xmm6, %xmm13
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,3,4,5,6,7]
@@ -5169,41 +5169,38 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: movdqa %xmm10, %xmm12
+; SSE-NEXT: pand %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm13, %xmm15
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,3,2,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; SSE-NEXT: por %xmm0, %xmm14
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm14[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm11, %xmm2
+; SSE-NEXT: movdqa %xmm9, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm11, %xmm3
-; SSE-NEXT: movdqa %xmm11, %xmm8
+; SSE-NEXT: pand %xmm9, %xmm3
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pand %xmm14, %xmm13
-; SSE-NEXT: movdqa %xmm14, %xmm7
+; SSE-NEXT: pand %xmm7, %xmm13
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,3,4,5,6,7]
@@ -5211,41 +5208,43 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm14[2,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
-; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: pand %xmm15, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[0,3,2,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; SSE-NEXT: por %xmm0, %xmm12
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm12[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: movdqa %xmm9, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm8, %xmm3
-; SSE-NEXT: movdqa %xmm8, %xmm9
+; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: movdqa %xmm9, %xmm10
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: pand %xmm7, %xmm10
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: pand %xmm7, %xmm11
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,3,4,5,6,7]
@@ -5253,19 +5252,18 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
-; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: pand %xmm15, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
@@ -5273,22 +5271,22 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm0, %xmm8
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm9, %xmm3
-; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: pand %xmm10, %xmm3
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: pand %xmm9, %xmm0
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: movdqa %xmm15, %xmm3
+; SSE-NEXT: pand %xmm15, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,3,4,5,6,7]
@@ -5300,38 +5298,36 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm15[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
-; SSE-NEXT: pand %xmm12, %xmm2
-; SSE-NEXT: movdqa %xmm12, %xmm9
+; SSE-NEXT: pand %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm10
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: por %xmm0, %xmm12
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm12[0,3,2,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: por %xmm0, %xmm9
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: pand %xmm10, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: pand %xmm10, %xmm3
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; SSE-NEXT: pxor %xmm7, %xmm7
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0]
; SSE-NEXT: movaps %xmm0, %xmm3
@@ -5345,46 +5341,44 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,65535,65535,0,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: pandn %xmm0, %xmm4
-; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
; SSE-NEXT: pand %xmm3, %xmm0
; SSE-NEXT: por %xmm4, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm6
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm7[8],xmm0[9],xmm7[9],xmm0[10],xmm7[10],xmm0[11],xmm7[11],xmm0[12],xmm7[12],xmm0[13],xmm7[13],xmm0[14],xmm7[14],xmm0[15],xmm7[15]
+; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,0,65535,65535,0]
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,0,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3],xmm14[4],xmm5[4],xmm14[5],xmm5[5],xmm14[6],xmm5[6],xmm14[7],xmm5[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,2,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm9, %xmm6
+; SSE-NEXT: pand %xmm10, %xmm6
; SSE-NEXT: por %xmm6, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm1[8],xmm13[9],xmm1[9],xmm13[10],xmm1[10],xmm13[11],xmm1[11],xmm13[12],xmm1[12],xmm13[13],xmm1[13],xmm13[14],xmm1[14],xmm13[15],xmm1[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm5[8],xmm13[9],xmm5[9],xmm13[10],xmm5[10],xmm13[11],xmm5[11],xmm13[12],xmm5[12],xmm13[13],xmm5[13],xmm13[14],xmm5[14],xmm13[15],xmm5[15]
; SSE-NEXT: movdqa %xmm13, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0]
; SSE-NEXT: movaps %xmm0, %xmm6
@@ -5397,94 +5391,94 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
; SSE-NEXT: movdqa %xmm3, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm1[8],xmm14[9],xmm1[9],xmm14[10],xmm1[10],xmm14[11],xmm1[11],xmm14[12],xmm1[12],xmm14[13],xmm1[13],xmm14[14],xmm1[14],xmm14[15],xmm1[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,3,2,1]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm5[8],xmm13[9],xmm5[9],xmm13[10],xmm5[10],xmm13[11],xmm5[11],xmm13[12],xmm5[12],xmm13[13],xmm5[13],xmm13[14],xmm5[14],xmm13[15],xmm5[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
; SSE-NEXT: pand %xmm3, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm6
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3],xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,2,0,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[0,2,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pand %xmm9, %xmm6
+; SSE-NEXT: pand %xmm10, %xmm6
; SSE-NEXT: por %xmm6, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm1[8],xmm10[9],xmm1[9],xmm10[10],xmm1[10],xmm10[11],xmm1[11],xmm10[12],xmm1[12],xmm10[13],xmm1[13],xmm10[14],xmm1[14],xmm10[15],xmm1[15]
-; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm5[8],xmm11[9],xmm5[9],xmm11[10],xmm5[10],xmm11[11],xmm5[11],xmm11[12],xmm5[12],xmm11[13],xmm5[13],xmm11[14],xmm5[14],xmm11[15],xmm5[15]
+; SSE-NEXT: movdqa %xmm11, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0]
; SSE-NEXT: movaps %xmm0, %xmm6
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm2[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm10[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm10[2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm11[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm11[2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
; SSE-NEXT: movdqa %xmm3, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm5[8],xmm7[9],xmm5[9],xmm7[10],xmm5[10],xmm7[11],xmm5[11],xmm7[12],xmm5[12],xmm7[13],xmm5[13],xmm7[14],xmm5[14],xmm7[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
; SSE-NEXT: pand %xmm3, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm6
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
; SSE-NEXT: por %xmm2, %xmm6
; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3],xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[0,2,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm0
; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: pand %xmm9, %xmm6
+; SSE-NEXT: pand %xmm10, %xmm6
; SSE-NEXT: por %xmm6, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
; SSE-NEXT: movdqa %xmm0, %xmm6
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm2[3,0]
; SSE-NEXT: movaps %xmm2, %xmm7
@@ -5498,381 +5492,377 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm2, %xmm6
; SSE-NEXT: movdqa %xmm15, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm1[8],xmm15[9],xmm1[9],xmm15[10],xmm1[10],xmm15[11],xmm1[11],xmm15[12],xmm1[12],xmm15[13],xmm1[13],xmm15[14],xmm1[14],xmm15[15],xmm1[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm5[8],xmm15[9],xmm5[9],xmm15[10],xmm5[10],xmm15[11],xmm5[11],xmm15[12],xmm5[12],xmm15[13],xmm5[13],xmm15[14],xmm5[14],xmm15[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm15[0,3,2,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
; SSE-NEXT: pand %xmm3, %xmm7
; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm6
+; SSE-NEXT: pand %xmm1, %xmm6
; SSE-NEXT: packuswb %xmm3, %xmm3
-; SSE-NEXT: pandn %xmm3, %xmm5
-; SSE-NEXT: por %xmm6, %xmm5
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: pxor %xmm0, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE-NEXT: pandn %xmm3, %xmm1
+; SSE-NEXT: por %xmm6, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[0,2,0,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3],xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[0,2,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm4
; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: pand %xmm9, %xmm5
+; SSE-NEXT: pand %xmm10, %xmm1
; SSE-NEXT: packuswb %xmm4, %xmm2
-; SSE-NEXT: pandn %xmm2, %xmm9
-; SSE-NEXT: por %xmm5, %xmm9
-; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm2, %xmm10
+; SSE-NEXT: por %xmm1, %xmm10
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm15, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[3,1,2,0]
-; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[3,1,2,0]
+; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm1[2,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm6, %xmm6
-; SSE-NEXT: pand %xmm15, %xmm6
+; SSE-NEXT: pand %xmm4, %xmm6
; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,0,65535,65535,0,65535,65535,0]
-; SSE-NEXT: pand %xmm11, %xmm13
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,2,1,3]
-; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,0,65535,65535,0,65535,65535,0]
+; SSE-NEXT: pand %xmm13, %xmm11
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,2,1,3]
+; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
; SSE-NEXT: packuswb %xmm1, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,0,0,0]
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: pand %xmm1, %xmm6
-; SSE-NEXT: por %xmm6, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535,65535,0,0,0]
+; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: pandn %xmm2, %xmm1
+; SSE-NEXT: pand %xmm3, %xmm6
+; SSE-NEXT: por %xmm6, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm12, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
; SSE-NEXT: packuswb %xmm6, %xmm2
-; SSE-NEXT: movdqa %xmm15, %xmm6
+; SSE-NEXT: movdqa %xmm4, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[3,1,2,0]
-; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm14[3,1,2,0]
+; SSE-NEXT: pand %xmm12, %xmm2
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm2[2,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm7, %xmm7
-; SSE-NEXT: pand %xmm15, %xmm7
+; SSE-NEXT: pand %xmm4, %xmm7
; SSE-NEXT: por %xmm6, %xmm7
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pand %xmm11, %xmm3
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,1,3]
-; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pand %xmm13, %xmm8
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[0,2,1,3]
+; SSE-NEXT: pand %xmm12, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
; SSE-NEXT: packuswb %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: pandn %xmm6, %xmm2
-; SSE-NEXT: pand %xmm1, %xmm7
-; SSE-NEXT: por %xmm7, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: pandn %xmm6, %xmm1
+; SSE-NEXT: pand %xmm3, %xmm7
+; SSE-NEXT: por %xmm7, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm6
+; SSE-NEXT: pand %xmm12, %xmm6
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
; SSE-NEXT: packuswb %xmm7, %xmm6
-; SSE-NEXT: movdqa %xmm15, %xmm7
+; SSE-NEXT: movdqa %xmm4, %xmm7
; SSE-NEXT: pandn %xmm6, %xmm7
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,1,2,0]
-; SSE-NEXT: pand %xmm10, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[3,1,2,0]
+; SSE-NEXT: pand %xmm12, %xmm6
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm6[2,1,0,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm8, %xmm8
-; SSE-NEXT: pand %xmm15, %xmm8
-; SSE-NEXT: por %xmm7, %xmm8
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm11, %xmm2
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,2,1,3]
-; SSE-NEXT: pand %xmm10, %xmm6
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm6[2,1,0,3,4,5,6,7]
+; SSE-NEXT: packuswb %xmm9, %xmm9
+; SSE-NEXT: pand %xmm4, %xmm9
+; SSE-NEXT: por %xmm7, %xmm9
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: pand %xmm13, %xmm1
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,1,3]
+; SSE-NEXT: pand %xmm12, %xmm6
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,4,7]
; SSE-NEXT: packuswb %xmm6, %xmm7
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: pandn %xmm7, %xmm2
-; SSE-NEXT: pand %xmm1, %xmm8
-; SSE-NEXT: por %xmm8, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: pandn %xmm7, %xmm1
+; SSE-NEXT: pand %xmm3, %xmm9
+; SSE-NEXT: por %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa (%rsp), %xmm7 # 16-byte Reload
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm7, (%rsp) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm7[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pand %xmm12, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm7[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,2]
-; SSE-NEXT: packuswb %xmm8, %xmm7
-; SSE-NEXT: movdqa %xmm15, %xmm8
-; SSE-NEXT: pandn %xmm7, %xmm8
+; SSE-NEXT: packuswb %xmm9, %xmm7
+; SSE-NEXT: movdqa %xmm4, %xmm9
+; SSE-NEXT: pandn %xmm7, %xmm9
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[3,1,2,0]
-; SSE-NEXT: pand %xmm10, %xmm7
+; SSE-NEXT: pand %xmm12, %xmm7
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm7[2,1,0,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm9, %xmm9
-; SSE-NEXT: pand %xmm15, %xmm9
-; SSE-NEXT: por %xmm8, %xmm9
-; SSE-NEXT: movdqa %xmm11, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: pand %xmm11, %xmm7
+; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm7[2,1,0,3,4,5,6,7]
+; SSE-NEXT: packuswb %xmm10, %xmm10
+; SSE-NEXT: pand %xmm4, %xmm10
+; SSE-NEXT: por %xmm9, %xmm10
+; SSE-NEXT: movdqa %xmm13, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: pand %xmm13, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: por %xmm7, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,2,1,3]
-; SSE-NEXT: pand %xmm10, %xmm7
+; SSE-NEXT: pand %xmm12, %xmm7
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,4,7]
-; SSE-NEXT: packuswb %xmm7, %xmm8
-; SSE-NEXT: movdqa %xmm1, %xmm7
-; SSE-NEXT: pandn %xmm8, %xmm7
-; SSE-NEXT: pand %xmm1, %xmm9
-; SSE-NEXT: por %xmm9, %xmm7
-; SSE-NEXT: movdqa %xmm0, %xmm8
-; SSE-NEXT: pxor %xmm5, %xmm5
-; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE-NEXT: packuswb %xmm7, %xmm9
+; SSE-NEXT: movdqa %xmm3, %xmm7
+; SSE-NEXT: pandn %xmm9, %xmm7
+; SSE-NEXT: pand %xmm3, %xmm10
+; SSE-NEXT: por %xmm10, %xmm7
; SSE-NEXT: movdqa %xmm0, %xmm9
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm8[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm8[2,3]
-; SSE-NEXT: psrlq $48, %xmm8
-; SSE-NEXT: psrldq {{.*#+}} xmm9 = xmm9[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm0[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,5,7]
-; SSE-NEXT: packuswb %xmm9, %xmm8
-; SSE-NEXT: movdqa %xmm15, %xmm10
-; SSE-NEXT: pandn %xmm8, %xmm10
-; SSE-NEXT: movdqa %xmm12, %xmm8
-; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[1,1,2,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE-NEXT: movdqa %xmm0, %xmm10
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm9[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm9[2,3]
+; SSE-NEXT: psrlq $48, %xmm9
+; SSE-NEXT: psrldq {{.*#+}} xmm10 = xmm10[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm0[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,5,7]
+; SSE-NEXT: packuswb %xmm10, %xmm9
+; SSE-NEXT: movdqa %xmm4, %xmm12
+; SSE-NEXT: pandn %xmm9, %xmm12
+; SSE-NEXT: movdqa %xmm15, %xmm9
+; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[1,1,2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,5,5,5,5]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: pandn %xmm8, %xmm11
-; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3],xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm12[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm8[3,1,1,2,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm12
-; SSE-NEXT: por %xmm11, %xmm12
-; SSE-NEXT: packuswb %xmm12, %xmm12
-; SSE-NEXT: pand %xmm15, %xmm12
-; SSE-NEXT: por %xmm10, %xmm12
-; SSE-NEXT: movdqa %xmm13, %xmm8
-; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm5[8],xmm13[9],xmm5[9],xmm13[10],xmm5[10],xmm13[11],xmm5[11],xmm13[12],xmm5[12],xmm13[13],xmm5[13],xmm13[14],xmm5[14],xmm13[15],xmm5[15]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm13[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,7,4]
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,65535,0,65535,0,0]
+; SSE-NEXT: movdqa %xmm0, %xmm13
+; SSE-NEXT: pandn %xmm9, %xmm13
+; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3],xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm15[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm9[3,1,1,2,4,5,6,7]
+; SSE-NEXT: pand %xmm0, %xmm10
+; SSE-NEXT: por %xmm13, %xmm10
+; SSE-NEXT: packuswb %xmm10, %xmm10
+; SSE-NEXT: pand %xmm4, %xmm10
+; SSE-NEXT: por %xmm12, %xmm10
+; SSE-NEXT: movdqa %xmm11, %xmm9
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm5[8],xmm11[9],xmm5[9],xmm11[10],xmm5[10],xmm11[11],xmm5[11],xmm11[12],xmm5[12],xmm11[13],xmm5[13],xmm11[14],xmm5[14],xmm11[15],xmm5[15]
+; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm11[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm12[0,1,2,3,5,5,7,4]
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,0,65535,0,0]
+; SSE-NEXT: movdqa %xmm1, %xmm15
+; SSE-NEXT: pandn %xmm13, %xmm15
+; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3],xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,3,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
+; SSE-NEXT: pand %xmm1, %xmm9
+; SSE-NEXT: por %xmm9, %xmm15
+; SSE-NEXT: packuswb %xmm15, %xmm13
+; SSE-NEXT: movdqa %xmm3, %xmm9
+; SSE-NEXT: pandn %xmm13, %xmm9
+; SSE-NEXT: pand %xmm3, %xmm10
+; SSE-NEXT: por %xmm10, %xmm9
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm11, %xmm10
+; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3],xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
; SSE-NEXT: movdqa %xmm11, %xmm13
-; SSE-NEXT: pandn %xmm10, %xmm13
-; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3],xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pand %xmm11, %xmm8
-; SSE-NEXT: por %xmm8, %xmm13
+; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm10[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm10[2,3]
+; SSE-NEXT: psrlq $48, %xmm10
+; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm11[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,5,7]
; SSE-NEXT: packuswb %xmm13, %xmm10
-; SSE-NEXT: movdqa %xmm1, %xmm8
-; SSE-NEXT: pandn %xmm10, %xmm8
-; SSE-NEXT: pand %xmm1, %xmm12
-; SSE-NEXT: por %xmm12, %xmm8
-; SSE-NEXT: movdqa %xmm14, %xmm9
+; SSE-NEXT: movdqa %xmm4, %xmm13
+; SSE-NEXT: pandn %xmm10, %xmm13
; SSE-NEXT: movdqa %xmm14, %xmm10
; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3],xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
-; SSE-NEXT: movdqa %xmm9, %xmm12
-; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0],xmm10[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0],xmm10[2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa %xmm0, %xmm15
+; SSE-NEXT: pandn %xmm10, %xmm15
+; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3],xmm14[4],xmm5[4],xmm14[5],xmm5[5],xmm14[6],xmm5[6],xmm14[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm14[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[3,1,1,2,4,5,6,7]
+; SSE-NEXT: pand %xmm0, %xmm10
+; SSE-NEXT: por %xmm15, %xmm10
+; SSE-NEXT: packuswb %xmm10, %xmm10
+; SSE-NEXT: pand %xmm4, %xmm10
+; SSE-NEXT: por %xmm13, %xmm10
+; SSE-NEXT: movdqa %xmm8, %xmm13
+; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15]
+; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm8[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,5,7,4]
+; SSE-NEXT: movdqa %xmm1, %xmm12
+; SSE-NEXT: pandn %xmm15, %xmm12
+; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3],xmm13[4],xmm5[4],xmm13[5],xmm5[5],xmm13[6],xmm5[6],xmm13[7],xmm5[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm13[0,3,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm13[0,1,1,3,4,5,6,7]
+; SSE-NEXT: pand %xmm1, %xmm13
+; SSE-NEXT: por %xmm13, %xmm12
+; SSE-NEXT: packuswb %xmm12, %xmm12
+; SSE-NEXT: movdqa %xmm3, %xmm13
+; SSE-NEXT: pandn %xmm12, %xmm13
+; SSE-NEXT: pand %xmm3, %xmm10
+; SSE-NEXT: por %xmm10, %xmm13
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm11, %xmm10
+; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3],xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
+; SSE-NEXT: movdqa %xmm11, %xmm12
+; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm10[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm10[2,3]
; SSE-NEXT: psrlq $48, %xmm10
; SSE-NEXT: psrldq {{.*#+}} xmm12 = xmm12[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm9[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm11[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,5,7]
; SSE-NEXT: packuswb %xmm12, %xmm10
-; SSE-NEXT: movdqa %xmm15, %xmm12
+; SSE-NEXT: movdqa %xmm4, %xmm12
; SSE-NEXT: pandn %xmm10, %xmm12
-; SSE-NEXT: movdqa %xmm4, %xmm10
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm14, %xmm10
; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
-; SSE-NEXT: movdqa %xmm0, %xmm14
-; SSE-NEXT: pandn %xmm10, %xmm14
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm4[0,1,2,3,7,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pandn %xmm10, %xmm11
+; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3],xmm14[4],xmm5[4],xmm14[5],xmm5[5],xmm14[6],xmm5[6],xmm14[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm14[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm10[3,1,1,2,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm13
-; SSE-NEXT: por %xmm14, %xmm13
-; SSE-NEXT: packuswb %xmm13, %xmm13
-; SSE-NEXT: pand %xmm15, %xmm13
-; SSE-NEXT: por %xmm12, %xmm13
-; SSE-NEXT: movdqa %xmm3, %xmm10
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
-; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm3[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,5,7,4]
-; SSE-NEXT: movdqa %xmm11, %xmm14
-; SSE-NEXT: pandn %xmm12, %xmm14
+; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm10[3,1,1,2,4,5,6,7]
+; SSE-NEXT: pand %xmm0, %xmm15
+; SSE-NEXT: por %xmm11, %xmm15
+; SSE-NEXT: packuswb %xmm15, %xmm15
+; SSE-NEXT: pand %xmm4, %xmm15
+; SSE-NEXT: por %xmm12, %xmm15
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm14, %xmm10
+; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm5[8],xmm14[9],xmm5[9],xmm14[10],xmm5[10],xmm14[11],xmm5[11],xmm14[12],xmm5[12],xmm14[13],xmm5[13],xmm14[14],xmm5[14],xmm14[15],xmm5[15]
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm14[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,7,4]
+; SSE-NEXT: movdqa %xmm1, %xmm12
+; SSE-NEXT: pandn %xmm11, %xmm12
; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3],xmm10[4],xmm5[4],xmm10[5],xmm5[5],xmm10[6],xmm5[6],xmm10[7],xmm5[7]
; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,3,1,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pand %xmm11, %xmm10
-; SSE-NEXT: por %xmm10, %xmm14
-; SSE-NEXT: packuswb %xmm14, %xmm10
-; SSE-NEXT: movdqa %xmm1, %xmm12
-; SSE-NEXT: pandn %xmm10, %xmm12
-; SSE-NEXT: pand %xmm1, %xmm13
-; SSE-NEXT: por %xmm13, %xmm12
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm9, %xmm10
-; SSE-NEXT: pxor %xmm3, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm3[8],xmm10[9],xmm3[9],xmm10[10],xmm3[10],xmm10[11],xmm3[11],xmm10[12],xmm3[12],xmm10[13],xmm3[13],xmm10[14],xmm3[14],xmm10[15],xmm3[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm3[0],xmm9[1],xmm3[1],xmm9[2],xmm3[2],xmm9[3],xmm3[3],xmm9[4],xmm3[4],xmm9[5],xmm3[5],xmm9[6],xmm3[6],xmm9[7],xmm3[7]
-; SSE-NEXT: pxor %xmm4, %xmm4
-; SSE-NEXT: movdqa %xmm9, %xmm13
-; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0],xmm10[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0],xmm10[2,3]
-; SSE-NEXT: psrlq $48, %xmm10
-; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm9[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,5,7]
-; SSE-NEXT: packuswb %xmm13, %xmm10
-; SSE-NEXT: movdqa %xmm15, %xmm13
-; SSE-NEXT: pandn %xmm10, %xmm13
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm3, %xmm10
-; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
-; SSE-NEXT: movdqa %xmm0, %xmm9
-; SSE-NEXT: pandn %xmm10, %xmm9
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm3[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm10[3,1,1,2,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm14
-; SSE-NEXT: por %xmm9, %xmm14
-; SSE-NEXT: packuswb %xmm14, %xmm14
-; SSE-NEXT: pand %xmm15, %xmm14
-; SSE-NEXT: por %xmm13, %xmm14
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm3, %xmm9
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm3[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,7,4]
-; SSE-NEXT: movdqa %xmm11, %xmm13
-; SSE-NEXT: pandn %xmm10, %xmm13
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1],xmm9[2],xmm4[2],xmm9[3],xmm4[3],xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,3,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pand %xmm11, %xmm9
-; SSE-NEXT: por %xmm9, %xmm13
-; SSE-NEXT: packuswb %xmm13, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm13
-; SSE-NEXT: pandn %xmm9, %xmm13
-; SSE-NEXT: pand %xmm1, %xmm14
-; SSE-NEXT: por %xmm14, %xmm13
-; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm3, %xmm9
-; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE-NEXT: pand %xmm1, %xmm10
+; SSE-NEXT: por %xmm10, %xmm12
+; SSE-NEXT: packuswb %xmm12, %xmm11
; SSE-NEXT: movdqa %xmm3, %xmm10
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm9[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm9[2,3]
-; SSE-NEXT: psrlq $48, %xmm9
-; SSE-NEXT: psrldq {{.*#+}} xmm10 = xmm10[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm3[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,5,7]
-; SSE-NEXT: packuswb %xmm10, %xmm9
-; SSE-NEXT: movdqa %xmm6, %xmm10
-; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm5[8],xmm10[9],xmm5[9],xmm10[10],xmm5[10],xmm10[11],xmm5[11],xmm10[12],xmm5[12],xmm10[13],xmm5[13],xmm10[14],xmm5[14],xmm10[15],xmm5[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
+; SSE-NEXT: pandn %xmm11, %xmm10
+; SSE-NEXT: pand %xmm3, %xmm15
+; SSE-NEXT: por %xmm15, %xmm10
+; SSE-NEXT: movdqa (%rsp), %xmm8 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm8, %xmm11
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm5[8],xmm11[9],xmm5[9],xmm11[10],xmm5[10],xmm11[11],xmm5[11],xmm11[12],xmm5[12],xmm11[13],xmm5[13],xmm11[14],xmm5[14],xmm11[15],xmm5[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3],xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
+; SSE-NEXT: movdqa %xmm8, %xmm12
+; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0],xmm11[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm11[2,3]
+; SSE-NEXT: psrlq $48, %xmm11
+; SSE-NEXT: psrldq {{.*#+}} xmm12 = xmm12[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm8[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,5,7]
+; SSE-NEXT: packuswb %xmm12, %xmm11
+; SSE-NEXT: movdqa %xmm6, %xmm12
+; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm5[8],xmm12[9],xmm5[9],xmm12[10],xmm5[10],xmm12[11],xmm5[11],xmm12[12],xmm5[12],xmm12[13],xmm5[13],xmm12[14],xmm5[14],xmm12[15],xmm5[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,2,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,5,5,5]
; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm6[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm14[3,1,1,2,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm14
-; SSE-NEXT: pandn %xmm10, %xmm0
-; SSE-NEXT: por %xmm14, %xmm0
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm9, %xmm15
+; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm6[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm15[3,1,1,2,4,5,6,7]
+; SSE-NEXT: pand %xmm0, %xmm15
+; SSE-NEXT: pandn %xmm12, %xmm0
; SSE-NEXT: por %xmm15, %xmm0
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: pandn %xmm11, %xmm4
+; SSE-NEXT: por %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pand %xmm11, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,7,4]
-; SSE-NEXT: pandn %xmm5, %xmm11
-; SSE-NEXT: por %xmm4, %xmm11
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: packuswb %xmm11, %xmm4
-; SSE-NEXT: pandn %xmm4, %xmm1
+; SSE-NEXT: pandn %xmm5, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -5913,11 +5903,11 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, (%r9)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movdqa %xmm1, 16(%rax)
-; SSE-NEXT: movdqa %xmm13, 32(%rax)
-; SSE-NEXT: movdqa %xmm12, 48(%rax)
-; SSE-NEXT: movdqa %xmm8, (%rax)
-; SSE-NEXT: addq $792, %rsp # imm = 0x318
+; SSE-NEXT: movdqa %xmm3, 16(%rax)
+; SSE-NEXT: movdqa %xmm10, 32(%rax)
+; SSE-NEXT: movdqa %xmm13, 48(%rax)
+; SSE-NEXT: movdqa %xmm9, (%rax)
+; SSE-NEXT: addq $840, %rsp # imm = 0x348
; SSE-NEXT: retq
;
; AVX-LABEL: load_i8_stride6_vf64:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
index 0ee10a33c1d0c3..31f4a2f77e27df 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
@@ -942,12 +942,11 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pandn %xmm4, %xmm2
; SSE-NEXT: movdqa %xmm4, %xmm11
; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,0,65535,0,65535]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
; SSE-NEXT: pand %xmm7, %xmm2
; SSE-NEXT: pandn %xmm5, %xmm7
; SSE-NEXT: por %xmm2, %xmm7
@@ -973,9 +972,9 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa %xmm11, %xmm1
; SSE-NEXT: pand %xmm5, %xmm10
; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,65535,0,65535,65535,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: pand %xmm12, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, %xmm11
+; SSE-NEXT: pand %xmm12, %xmm11
+; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pandn %xmm0, %xmm12
; SSE-NEXT: movaps %xmm0, %xmm14
; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,0],xmm6[0,0]
@@ -984,8 +983,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pandn %xmm6, %xmm5
; SSE-NEXT: movdqa %xmm6, %xmm15
-; SSE-NEXT: pxor %xmm0, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm4[8],xmm15[9],xmm4[9],xmm15[10],xmm4[10],xmm15[11],xmm4[11],xmm15[12],xmm4[12],xmm15[13],xmm4[13],xmm15[14],xmm4[14],xmm15[15],xmm4[15]
; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm9[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
@@ -997,21 +995,20 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm7, %xmm13
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,0,65535,65535,0,65535]
; SSE-NEXT: movdqa %xmm7, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm1, %xmm9
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: pand %xmm7, %xmm3
; SSE-NEXT: por %xmm9, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm9
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3],xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1],xmm9[2],xmm4[2],xmm9[3],xmm4[3],xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,0,65535]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: pandn %xmm9, %xmm1
-; SSE-NEXT: pxor %xmm6, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3],xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm8, %xmm9
; SSE-NEXT: pand %xmm1, %xmm9
@@ -1028,12 +1025,13 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: pand %xmm11, %xmm1
; SSE-NEXT: por %xmm1, %xmm9
+; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: pandn %xmm2, %xmm1
; SSE-NEXT: por %xmm1, %xmm10
; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3],xmm10[4],xmm6[4],xmm10[5],xmm6[5],xmm10[6],xmm6[6],xmm10[7],xmm6[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3],xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
; SSE-NEXT: pand %xmm0, %xmm10
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: por %xmm10, %xmm0
@@ -1054,14 +1052,15 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pandn %xmm1, %xmm8
; SSE-NEXT: por %xmm0, %xmm8
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm6, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm2, %xmm10
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm3, %xmm10
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,0,65535,65535,65535,65535]
; SSE-NEXT: pand %xmm3, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm3
@@ -1073,7 +1072,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm0, %xmm0
; SSE-NEXT: pand %xmm11, %xmm0
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3],xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm12[0,1,2,3,7,5,6,7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm12[0,3,2,3,4,5,6,7]
@@ -1084,14 +1083,11 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm0, %xmm11
; SSE-NEXT: movdqa %xmm11, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm10, %xmm11
; SSE-NEXT: pandn %xmm10, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
@@ -1105,9 +1101,8 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pandn %xmm15, %xmm3
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: packuswb %xmm3, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[1,3,2,3]
-; SSE-NEXT: movdqa %xmm11, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE-NEXT: movdqa %xmm1, %xmm0
@@ -1129,17 +1124,19 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm1[0,3,2,3]
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm7
+; SSE-NEXT: pandn %xmm10, %xmm7
; SSE-NEXT: por %xmm0, %xmm7
; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15]
+; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,65535,65535]
; SSE-NEXT: pand %xmm1, %xmm5
; SSE-NEXT: pandn %xmm15, %xmm1
@@ -1154,7 +1151,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movq %xmm9, (%rdx)
; SSE-NEXT: movq %xmm8, (%rcx)
; SSE-NEXT: movq %xmm6, (%r8)
-; SSE-NEXT: movq %xmm10, (%r9)
+; SSE-NEXT: movq %xmm3, (%r9)
; SSE-NEXT: movq %xmm11, (%rdi)
; SSE-NEXT: movq %xmm0, (%rax)
; SSE-NEXT: retq
@@ -1862,19 +1859,19 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6) nounwind {
; SSE-LABEL: load_i8_stride7_vf16:
; SSE: # %bb.0:
-; SSE-NEXT: subq $168, %rsp
-; SSE-NEXT: movdqa 96(%rdi), %xmm15
+; SSE-NEXT: subq $200, %rsp
+; SSE-NEXT: movdqa 96(%rdi), %xmm8
; SSE-NEXT: movdqa 80(%rdi), %xmm4
; SSE-NEXT: movdqa 64(%rdi), %xmm7
; SSE-NEXT: movdqa (%rdi), %xmm6
; SSE-NEXT: movdqa 16(%rdi), %xmm3
; SSE-NEXT: movdqa 32(%rdi), %xmm1
-; SSE-NEXT: movdqa 48(%rdi), %xmm8
+; SSE-NEXT: movdqa 48(%rdi), %xmm5
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,0,65535,65535]
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm12
-; SSE-NEXT: movdqa %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: movdqa %xmm5, %xmm1
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: pxor %xmm13, %xmm13
@@ -1888,38 +1885,37 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,0,65535,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,65535,0,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm3, %xmm9
; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: movdqa %xmm6, %xmm11
; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm3
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,65535,65535,0,65535,0,65535]
-; SSE-NEXT: movdqa %xmm14, %xmm6
-; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: pand %xmm12, %xmm3
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
+; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,65535,65535,65535,0,65535,0,65535]
+; SSE-NEXT: movdqa %xmm15, %xmm6
+; SSE-NEXT: pandn %xmm2, %xmm6
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3],xmm3[4],xmm13[4],xmm3[5],xmm13[5],xmm3[6],xmm13[6],xmm3[7],xmm13[7]
-; SSE-NEXT: pand %xmm14, %xmm3
+; SSE-NEXT: pand %xmm15, %xmm3
; SSE-NEXT: por %xmm6, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,1,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,1,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,2,1,4,5,6,7]
+; SSE-NEXT: packuswb %xmm2, %xmm2
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,0,65535,65535,0,65535]
; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: pandn %xmm7, %xmm1
; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: movdqa %xmm4, %xmm5
; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm3, %xmm13
; SSE-NEXT: por %xmm1, %xmm2
@@ -1931,22 +1927,20 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE-NEXT: movdqa %xmm15, %xmm2
-; SSE-NEXT: movdqa %xmm15, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm6[8],xmm8[9],xmm6[9],xmm8[10],xmm6[10],xmm8[11],xmm6[11],xmm8[12],xmm6[12],xmm8[13],xmm6[13],xmm8[14],xmm6[14],xmm8[15],xmm6[15]
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pxor %xmm15, %xmm15
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE-NEXT: pxor %xmm14, %xmm14
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm6, %xmm1
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,0,0,0]
; SSE-NEXT: pand %xmm1, %xmm0
@@ -1955,13 +1949,13 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm12, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm1
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: movdqa %xmm5, %xmm1
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm15[8],xmm0[9],xmm15[9],xmm0[10],xmm15[10],xmm0[11],xmm15[11],xmm0[12],xmm15[12],xmm0[13],xmm15[13],xmm0[14],xmm15[14],xmm0[15],xmm15[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm14[8],xmm0[9],xmm14[9],xmm0[10],xmm14[10],xmm0[11],xmm14[11],xmm0[12],xmm14[12],xmm0[13],xmm14[13],xmm0[14],xmm14[14],xmm0[15],xmm14[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm2
@@ -1970,93 +1964,97 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm13, %xmm1
; SSE-NEXT: pandn %xmm9, %xmm1
; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm11, %xmm2
; SSE-NEXT: pand %xmm13, %xmm2
-; SSE-NEXT: movdqa %xmm13, %xmm11
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm11, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm15[8],xmm2[9],xmm15[9],xmm2[10],xmm15[10],xmm2[11],xmm15[11],xmm2[12],xmm15[12],xmm2[13],xmm15[13],xmm2[14],xmm15[14],xmm2[15],xmm15[15]
-; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm14[8],xmm2[9],xmm14[9],xmm2[10],xmm14[10],xmm2[11],xmm14[11],xmm2[12],xmm14[12],xmm2[13],xmm14[13],xmm2[14],xmm14[14],xmm2[15],xmm14[15]
+; SSE-NEXT: pand %xmm11, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm1[0,1,2,3,5,5,5,5]
-; SSE-NEXT: packuswb %xmm13, %xmm13
-; SSE-NEXT: pand %xmm4, %xmm13
-; SSE-NEXT: por %xmm0, %xmm13
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm6
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: pandn %xmm4, %xmm0
; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE-NEXT: pand %xmm14, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm14
-; SSE-NEXT: por %xmm1, %xmm14
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm8, %xmm10
-; SSE-NEXT: por %xmm0, %xmm10
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,0,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm15
+; SSE-NEXT: por %xmm1, %xmm15
+; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: pandn %xmm5, %xmm12
+; SSE-NEXT: por %xmm0, %xmm12
+; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,0,65535,65535,0,65535,65535,65535]
; SSE-NEXT: movdqa %xmm9, %xmm7
-; SSE-NEXT: pand %xmm14, %xmm7
-; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm5, %xmm15
-; SSE-NEXT: pand %xmm14, %xmm15
-; SSE-NEXT: movdqa %xmm11, %xmm3
-; SSE-NEXT: pandn %xmm8, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm8[0,2,2,3]
-; SSE-NEXT: movdqa %xmm8, %xmm4
-; SSE-NEXT: pand %xmm14, %xmm8
-; SSE-NEXT: movdqa %xmm14, %xmm9
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm12, %xmm14
-; SSE-NEXT: por %xmm8, %xmm14
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm15, %xmm7
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: pandn %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm5[0,2,2,3]
+; SSE-NEXT: movdqa %xmm5, %xmm10
+; SSE-NEXT: pand %xmm15, %xmm5
+; SSE-NEXT: movdqa %xmm15, %xmm9
+; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm2, %xmm15
+; SSE-NEXT: por %xmm5, %xmm15
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm8, %xmm5
; SSE-NEXT: pslld $16, %xmm5
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; SSE-NEXT: movdqa %xmm8, %xmm3
; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE-NEXT: movdqa %xmm8, %xmm1
-; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1],xmm10[2],xmm1[2],xmm10[3],xmm1[3],xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,7,5,6,7]
+; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pxor %xmm0, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm2[0,1,2,3,6,4,6,5]
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,65535,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: pand %xmm12, %xmm10
+; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm2[0,1,2,3,6,4,6,5]
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: pand %xmm14, %xmm13
; SSE-NEXT: movdqa %xmm8, %xmm2
-; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: pand %xmm14, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm12, (%rsp) # 16-byte Spill
-; SSE-NEXT: pandn %xmm0, %xmm12
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1],xmm14[2],xmm1[2],xmm14[3],xmm1[3],xmm14[4],xmm1[4],xmm14[5],xmm1[5],xmm14[6],xmm1[6],xmm14[7],xmm1[7]
+; SSE-NEXT: movdqa %xmm14, (%rsp) # 16-byte Spill
+; SSE-NEXT: pandn %xmm6, %xmm14
+; SSE-NEXT: movdqa %xmm6, %xmm2
+; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE-NEXT: pxor %xmm6, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,65535,65535]
-; SSE-NEXT: pand %xmm0, %xmm14
+; SSE-NEXT: pand %xmm0, %xmm15
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -2073,38 +2071,39 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,4,6,7]
; SSE-NEXT: packuswb %xmm5, %xmm5
; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm0, %xmm12
; SSE-NEXT: por %xmm5, %xmm8
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: pandn %xmm8, %xmm0
-; SSE-NEXT: pand %xmm5, %xmm13
-; SSE-NEXT: por %xmm13, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: pandn %xmm8, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,0,65535]
; SSE-NEXT: movdqa %xmm2, %xmm8
-; SSE-NEXT: pandn %xmm6, %xmm8
+; SSE-NEXT: pandn %xmm4, %xmm8
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: por %xmm8, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm8
-; SSE-NEXT: pxor %xmm6, %xmm6
; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3],xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: pandn %xmm8, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
; SSE-NEXT: pxor %xmm8, %xmm8
-; SSE-NEXT: pand %xmm13, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
+; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: packuswb %xmm3, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: movdqa %xmm12, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: pand %xmm12, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa %xmm5, %xmm0
; SSE-NEXT: pandn %xmm3, %xmm0
@@ -2113,10 +2112,10 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm9, %xmm7
; SSE-NEXT: movdqa %xmm7, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
-; SSE-NEXT: movdqa %xmm13, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
-; SSE-NEXT: pand %xmm13, %xmm7
+; SSE-NEXT: pand %xmm2, %xmm7
; SSE-NEXT: por %xmm3, %xmm7
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[1,3,2,3]
@@ -2132,29 +2131,29 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
; SSE-NEXT: packuswb %xmm1, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm13, %xmm8
-; SSE-NEXT: pandn %xmm3, %xmm8
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm12
+; SSE-NEXT: pandn %xmm3, %xmm12
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,3,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm13, %xmm1
-; SSE-NEXT: por %xmm1, %xmm8
-; SSE-NEXT: pand %xmm5, %xmm8
-; SSE-NEXT: por %xmm0, %xmm8
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm9, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: por %xmm1, %xmm12
+; SSE-NEXT: pand %xmm5, %xmm12
+; SSE-NEXT: por %xmm0, %xmm12
; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: pandn %xmm9, %xmm0
+; SSE-NEXT: pand %xmm4, %xmm10
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm10, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[0,3,2,3,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1],xmm10[2],xmm1[2],xmm10[3],xmm1[3],xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
+; SSE-NEXT: pxor %xmm11, %xmm11
+; SSE-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrlq $48, %xmm0
@@ -2166,49 +2165,52 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
; SSE-NEXT: movdqa %xmm9, %xmm4
; SSE-NEXT: pand %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm10
; SSE-NEXT: por %xmm0, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3],xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm11[8],xmm4[9],xmm11[9],xmm4[10],xmm11[10],xmm4[11],xmm11[11],xmm4[12],xmm11[12],xmm4[13],xmm11[13],xmm4[14],xmm11[14],xmm4[15],xmm11[15]
; SSE-NEXT: pand %xmm1, %xmm4
; SSE-NEXT: por %xmm6, %xmm4
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[3,2,1,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,7,7,7,7]
-; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: pand %xmm13, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm13
-; SSE-NEXT: por %xmm13, %xmm4
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm4
+; SSE-NEXT: por %xmm0, %xmm4
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pandn %xmm6, %xmm0
-; SSE-NEXT: por %xmm0, %xmm15
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3],xmm15[4],xmm2[4],xmm15[5],xmm2[5],xmm15[6],xmm2[6],xmm15[7],xmm2[7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm11[8],xmm0[9],xmm11[9],xmm0[10],xmm11[10],xmm0[11],xmm11[11],xmm0[12],xmm11[12],xmm0[13],xmm11[13],xmm0[14],xmm11[14],xmm0[15],xmm11[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: pand %xmm3, %xmm15
+; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: por %xmm15, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm11
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[2,1,2,3,4,5,6,7]
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm11, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[0,2,1,0,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm5, %xmm15
-; SSE-NEXT: pandn %xmm3, %xmm15
+; SSE-NEXT: movdqa %xmm5, %xmm11
+; SSE-NEXT: pandn %xmm3, %xmm11
; SSE-NEXT: pand %xmm5, %xmm4
-; SSE-NEXT: por %xmm4, %xmm15
+; SSE-NEXT: por %xmm4, %xmm11
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pandn %xmm7, %xmm3
@@ -2227,22 +2229,21 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: por %xmm3, %xmm10
-; SSE-NEXT: packuswb %xmm2, %xmm10
+; SSE-NEXT: por %xmm3, %xmm13
+; SSE-NEXT: packuswb %xmm2, %xmm13
; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[0,1,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[0,1,3,3]
; SSE-NEXT: movss {{.*#+}} xmm2 = xmm4[0],xmm2[1,2,3]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; SSE-NEXT: movdqa %xmm7, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,65535,0,65535,65535]
-; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: pand %xmm10, %xmm3
; SSE-NEXT: pandn %xmm6, %xmm4
-; SSE-NEXT: movdqa %xmm6, %xmm11
; SSE-NEXT: por %xmm3, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE-NEXT: pxor %xmm10, %xmm10
+; SSE-NEXT: pxor %xmm13, %xmm13
; SSE-NEXT: pand %xmm1, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: por %xmm4, %xmm1
@@ -2252,7 +2253,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,65535,0]
; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[0,1,2,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm3, %xmm3
; SSE-NEXT: pandn %xmm3, %xmm0
@@ -2262,12 +2263,12 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: andps %xmm5, %xmm2
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: pand %xmm10, %xmm2
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm10[8],xmm3[9],xmm10[9],xmm3[10],xmm10[10],xmm3[11],xmm10[11],xmm3[12],xmm10[12],xmm3[13],xmm10[13],xmm3[14],xmm10[14],xmm3[15],xmm10[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3],xmm2[4],xmm10[4],xmm2[5],xmm10[5],xmm2[6],xmm10[6],xmm2[7],xmm10[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm13[8],xmm3[9],xmm13[9],xmm3[10],xmm13[10],xmm3[11],xmm13[11],xmm3[12],xmm13[12],xmm3[13],xmm13[13],xmm3[14],xmm13[14],xmm3[15],xmm13[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1],xmm2[2],xmm13[2],xmm2[3],xmm13[3],xmm2[4],xmm13[4],xmm2[5],xmm13[5],xmm2[6],xmm13[6],xmm2[7],xmm13[7]
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,65535,65535,65535,65535,65535,65535,0]
; SSE-NEXT: pand %xmm4, %xmm2
; SSE-NEXT: pandn %xmm3, %xmm4
@@ -2277,36 +2278,36 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: packuswb %xmm3, %xmm6
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[0,2,2,3]
-; SSE-NEXT: pand %xmm13, %xmm9
+; SSE-NEXT: pand %xmm10, %xmm9
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pandn %xmm3, %xmm13
+; SSE-NEXT: pandn %xmm3, %xmm10
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm10[8],xmm3[9],xmm10[9],xmm3[10],xmm10[10],xmm3[11],xmm10[11],xmm3[12],xmm10[12],xmm3[13],xmm10[13],xmm3[14],xmm10[14],xmm3[15],xmm10[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm13[8],xmm3[9],xmm13[9],xmm3[10],xmm13[10],xmm3[11],xmm13[11],xmm3[12],xmm13[12],xmm3[13],xmm13[13],xmm3[14],xmm13[14],xmm3[15],xmm13[15]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3],xmm2[4],xmm10[4],xmm2[5],xmm10[5],xmm2[6],xmm10[6],xmm2[7],xmm10[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1],xmm2[2],xmm13[2],xmm2[3],xmm13[3],xmm2[4],xmm13[4],xmm2[5],xmm13[5],xmm2[6],xmm13[6],xmm2[7],xmm13[7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; SSE-NEXT: packuswb %xmm2, %xmm2
; SSE-NEXT: movss {{.*#+}} xmm6 = xmm2[0],xmm6[1,2,3]
; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: movdqa %xmm7, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE-NEXT: pand %xmm3, %xmm2
-; SSE-NEXT: pandn %xmm11, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: pandn %xmm0, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3],xmm2[4],xmm10[4],xmm2[5],xmm10[5],xmm2[6],xmm10[6],xmm2[7],xmm10[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm10[8],xmm3[9],xmm10[9],xmm3[10],xmm10[10],xmm3[11],xmm10[11],xmm3[12],xmm10[12],xmm3[13],xmm10[13],xmm3[14],xmm10[14],xmm3[15],xmm10[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1],xmm2[2],xmm13[2],xmm2[3],xmm13[3],xmm2[4],xmm13[4],xmm2[5],xmm13[5],xmm2[6],xmm13[6],xmm2[7],xmm13[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm13[8],xmm3[9],xmm13[9],xmm3[10],xmm13[10],xmm3[11],xmm13[11],xmm3[12],xmm13[12],xmm3[13],xmm13[13],xmm3[14],xmm13[14],xmm3[15],xmm13[15]
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,0,65535,65535,65535]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm4
; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[0,1,0,3]
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm14[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,6]
; SSE-NEXT: packuswb %xmm2, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
@@ -2322,22 +2323,22 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pandn %xmm7, %xmm2
; SSE-NEXT: andps %xmm5, %xmm6
; SSE-NEXT: por %xmm6, %xmm2
-; SSE-NEXT: movdqa %xmm13, %xmm7
+; SSE-NEXT: movdqa %xmm10, %xmm7
; SSE-NEXT: por %xmm9, %xmm7
; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm4[0,1,2,3,6,4,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm10[8],xmm7[9],xmm10[9],xmm7[10],xmm10[10],xmm7[11],xmm10[11],xmm7[12],xmm10[12],xmm7[13],xmm10[13],xmm7[14],xmm10[14],xmm7[15],xmm10[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm13[8],xmm4[9],xmm13[9],xmm4[10],xmm13[10],xmm4[11],xmm13[11],xmm4[12],xmm13[12],xmm4[13],xmm13[13],xmm4[14],xmm13[14],xmm4[15],xmm13[15]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
; SSE-NEXT: pandn %xmm4, %xmm9
; SSE-NEXT: movdqa %xmm4, %xmm7
-; SSE-NEXT: por %xmm9, %xmm14
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm14[0,2,1,3]
+; SSE-NEXT: por %xmm9, %xmm15
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm15[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
@@ -2345,42 +2346,43 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm7, %xmm4
; SSE-NEXT: packuswb %xmm6, %xmm6
; SSE-NEXT: movss {{.*#+}} xmm4 = xmm6[0],xmm4[1,2,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm11[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,2,2,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,3,2,3]
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; SSE-NEXT: # xmm7 = mem[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
; SSE-NEXT: movdqa %xmm7, %xmm6
-; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm10[0],xmm6[1],xmm10[1],xmm6[2],xmm10[2],xmm6[3],xmm10[3],xmm6[4],xmm10[4],xmm6[5],xmm10[5],xmm6[6],xmm10[6],xmm6[7],xmm10[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm10[8],xmm7[9],xmm10[9],xmm7[10],xmm10[10],xmm7[11],xmm10[11],xmm7[12],xmm10[12],xmm7[13],xmm10[13],xmm7[14],xmm10[14],xmm7[15],xmm10[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15]
; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,0,65535,0,65535,65535]
; SSE-NEXT: pand %xmm9, %xmm7
; SSE-NEXT: pandn %xmm6, %xmm9
; SSE-NEXT: por %xmm7, %xmm9
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[2,1,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm8[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm6, %xmm6
-; SSE-NEXT: pand %xmm3, %xmm6
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm12[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,4,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm9[2,1,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm7, %xmm7
-; SSE-NEXT: pandn %xmm7, %xmm3
-; SSE-NEXT: por %xmm3, %xmm6
+; SSE-NEXT: pand %xmm3, %xmm7
+; SSE-NEXT: pandn %xmm6, %xmm3
+; SSE-NEXT: por %xmm7, %xmm3
; SSE-NEXT: andps %xmm5, %xmm4
-; SSE-NEXT: pandn %xmm6, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm5
; SSE-NEXT: por %xmm4, %xmm5
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movaps %xmm3, (%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, (%rdx)
-; SSE-NEXT: movdqa %xmm8, (%rcx)
-; SSE-NEXT: movdqa %xmm15, (%r8)
+; SSE-NEXT: movdqa %xmm12, (%rcx)
+; SSE-NEXT: movdqa %xmm11, (%r8)
; SSE-NEXT: movdqa %xmm1, (%r9)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE-NEXT: movdqa %xmm2, (%rax)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE-NEXT: movdqa %xmm5, (%rax)
-; SSE-NEXT: addq $168, %rsp
+; SSE-NEXT: addq $200, %rsp
; SSE-NEXT: retq
;
; AVX-LABEL: load_i8_stride7_vf16:
@@ -3601,13 +3603,14 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6) nounwind {
; SSE-LABEL: load_i8_stride7_vf32:
; SSE: # %bb.0:
-; SSE-NEXT: subq $648, %rsp # imm = 0x288
-; SSE-NEXT: movdqa 208(%rdi), %xmm14
+; SSE-NEXT: subq $632, %rsp # imm = 0x278
+; SSE-NEXT: movdqa 208(%rdi), %xmm11
; SSE-NEXT: movdqa 192(%rdi), %xmm5
; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 176(%rdi), %xmm6
; SSE-NEXT: movdqa 112(%rdi), %xmm4
; SSE-NEXT: movdqa 128(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 160(%rdi), %xmm7
; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 144(%rdi), %xmm1
@@ -3619,69 +3622,67 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm2, %xmm9
; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pxor %xmm10, %xmm10
+; SSE-NEXT: pxor %xmm15, %xmm15
; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm10[8],xmm0[9],xmm10[9],xmm0[10],xmm10[10],xmm0[11],xmm10[11],xmm0[12],xmm10[12],xmm0[13],xmm10[13],xmm0[14],xmm10[14],xmm0[15],xmm10[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm15[8],xmm0[9],xmm15[9],xmm0[10],xmm15[10],xmm0[11],xmm15[11],xmm0[12],xmm15[12],xmm0[13],xmm15[13],xmm0[14],xmm15[14],xmm0[15],xmm15[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,4,5,6]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,6]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm0, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
+; SSE-NEXT: packuswb %xmm0, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,0,65535,65535,65535,0,65535]
; SSE-NEXT: movdqa %xmm7, %xmm1
; SSE-NEXT: pandn %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm11
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm4, %xmm3
; SSE-NEXT: movdqa %xmm4, %xmm12
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: movdqa %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm7, %xmm14
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,0,65535,0,65535]
-; SSE-NEXT: movdqa %xmm7, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm15[8],xmm1[9],xmm15[9],xmm1[10],xmm15[10],xmm1[11],xmm15[11],xmm1[12],xmm15[12],xmm1[13],xmm15[13],xmm1[14],xmm15[14],xmm1[15],xmm15[15]
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,0,65535,0,65535]
+; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3],xmm3[4],xmm10[4],xmm3[5],xmm10[5],xmm3[6],xmm10[6],xmm3[7],xmm10[7]
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: movdqa %xmm7, %xmm15
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm0, %xmm7
; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,1,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pandn %xmm6, %xmm2
; SSE-NEXT: movdqa %xmm6, %xmm13
; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm5, %xmm3
-; SSE-NEXT: pand %xmm7, %xmm3
+; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3],xmm2[4],xmm10[4],xmm2[5],xmm10[5],xmm2[6],xmm10[6],xmm2[7],xmm10[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,4,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm10[8],xmm3[9],xmm10[9],xmm3[10],xmm10[10],xmm3[11],xmm10[11],xmm3[12],xmm10[12],xmm3[13],xmm10[13],xmm3[14],xmm10[14],xmm3[15],xmm10[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm15[8],xmm3[9],xmm15[9],xmm3[10],xmm15[10],xmm3[11],xmm15[11],xmm3[12],xmm15[12],xmm3[13],xmm15[13],xmm3[14],xmm15[14],xmm3[15],xmm15[15]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: movdqa %xmm14, %xmm4
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15]
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3],xmm3[4],xmm10[4],xmm3[5],xmm10[5],xmm3[6],xmm10[6],xmm3[7],xmm10[7]
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE-NEXT: movdqa %xmm11, %xmm10
+; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3],xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
+; SSE-NEXT: movdqa %xmm11, %xmm3
+; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3]
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm3, %xmm3
@@ -3698,17 +3699,17 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pand %xmm5, %xmm1
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 32(%rdi), %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 32(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm2, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: movdqa 48(%rdi), %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm9, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3],xmm2[4],xmm10[4],xmm2[5],xmm10[5],xmm2[6],xmm10[6],xmm2[7],xmm10[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm15[8],xmm1[9],xmm15[9],xmm1[10],xmm15[10],xmm1[11],xmm15[11],xmm1[12],xmm15[12],xmm1[13],xmm15[13],xmm1[14],xmm15[14],xmm1[15],xmm15[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
@@ -3717,65 +3718,68 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm2
; SSE-NEXT: movdqa 16(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: movdqa %xmm14, %xmm1
; SSE-NEXT: pandn %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm14
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm3, %xmm9
; SSE-NEXT: movdqa (%rdi), %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm4, %xmm9
+; SSE-NEXT: movdqa %xmm4, %xmm14
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15]
-; SSE-NEXT: movdqa %xmm15, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm15[8],xmm1[9],xmm15[9],xmm1[10],xmm15[10],xmm1[11],xmm15[11],xmm1[12],xmm15[12],xmm1[13],xmm15[13],xmm1[14],xmm15[14],xmm1[15],xmm15[15]
+; SSE-NEXT: movdqa %xmm7, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3],xmm3[4],xmm10[4],xmm3[5],xmm10[5],xmm3[6],xmm10[6],xmm3[7],xmm10[7]
-; SSE-NEXT: pand %xmm15, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
+; SSE-NEXT: pand %xmm7, %xmm3
; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,1,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pandn %xmm2, %xmm8
+; SSE-NEXT: por %xmm1, %xmm8
; SSE-NEXT: movdqa 64(%rdi), %xmm2
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: pandn %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm2, %xmm15
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 80(%rdi), %xmm8
-; SSE-NEXT: movdqa %xmm8, %xmm2
-; SSE-NEXT: pand %xmm7, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,4,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm10[8],xmm2[9],xmm10[9],xmm2[10],xmm10[10],xmm2[11],xmm10[11],xmm2[12],xmm10[12],xmm2[13],xmm10[13],xmm2[14],xmm10[14],xmm2[15],xmm10[15]
+; SSE-NEXT: movdqa 80(%rdi), %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: pxor %xmm3, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,4,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE-NEXT: movdqa 96(%rdi), %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm10[8],xmm3[9],xmm10[9],xmm3[10],xmm10[10],xmm3[11],xmm10[11],xmm3[12],xmm10[12],xmm3[13],xmm10[13],xmm3[14],xmm10[14],xmm3[15],xmm10[15]
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3],xmm2[4],xmm10[4],xmm2[5],xmm10[5],xmm2[6],xmm10[6],xmm2[7],xmm10[7]
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE-NEXT: pxor %xmm7, %xmm7
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm6, %xmm0
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: pand %xmm5, %xmm8
; SSE-NEXT: pandn %xmm3, %xmm5
-; SSE-NEXT: por %xmm1, %xmm5
+; SSE-NEXT: por %xmm8, %xmm5
; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE-NEXT: movdqa %xmm2, %xmm0
@@ -3784,40 +3788,39 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm10[8],xmm2[9],xmm10[9],xmm2[10],xmm10[10],xmm2[11],xmm10[11],xmm2[12],xmm10[12],xmm2[13],xmm10[13],xmm2[14],xmm10[14],xmm2[15],xmm10[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrld $16, %xmm2
; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: pandn %xmm11, %xmm1
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm12, %xmm3
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: movdqa %xmm7, %xmm12
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm4, %xmm12
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm7, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm8, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm10[8],xmm3[9],xmm10[9],xmm3[10],xmm10[10],xmm3[11],xmm10[11],xmm3[12],xmm10[12],xmm3[13],xmm10[13],xmm3[14],xmm10[14],xmm3[15],xmm10[15]
-; SSE-NEXT: pand %xmm7, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm7[8],xmm3[9],xmm7[9],xmm3[10],xmm7[10],xmm3[11],xmm7[11],xmm3[12],xmm7[12],xmm3[13],xmm7[13],xmm3[14],xmm7[14],xmm3[15],xmm7[15]
+; SSE-NEXT: pand %xmm8, %xmm3
; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm5, %xmm1
-; SSE-NEXT: movdqa %xmm5, %xmm7
+; SSE-NEXT: pand %xmm6, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,65535,0,65535]
; SSE-NEXT: movdqa %xmm4, %xmm2
@@ -3827,25 +3830,24 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa %xmm4, %xmm13
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm10[8],xmm2[9],xmm10[9],xmm2[10],xmm10[10],xmm2[11],xmm10[11],xmm2[12],xmm10[12],xmm2[13],xmm10[13],xmm2[14],xmm10[14],xmm2[15],xmm10[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,65535,0,65535,0,65535]
-; SSE-NEXT: movdqa %xmm11, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,0,65535,0,65535]
+; SSE-NEXT: movdqa %xmm5, %xmm4
; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3],xmm3[4],xmm10[4],xmm3[5],xmm10[5],xmm3[6],xmm10[6],xmm3[7],xmm10[7]
-; SSE-NEXT: pand %xmm11, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
+; SSE-NEXT: pand %xmm5, %xmm3
; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pslld $16, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; SSE-NEXT: packuswb %xmm4, %xmm2
-; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: pslld $16, %xmm10
+; SSE-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
+; SSE-NEXT: packuswb %xmm11, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm7, %xmm4
; SSE-NEXT: pandn %xmm2, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,6,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: pand %xmm7, %xmm2
; SSE-NEXT: por %xmm2, %xmm4
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
; SSE-NEXT: movdqa %xmm3, %xmm2
@@ -3855,8 +3857,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE-NEXT: movdqa %xmm5, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pandn %xmm6, %xmm1
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pand %xmm5, %xmm2
@@ -3865,7 +3866,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pxor %xmm3, %xmm3
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE-NEXT: pxor %xmm5, %xmm5
+; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
@@ -3873,788 +3874,804 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: movdqa %xmm7, %xmm1
+; SSE-NEXT: movdqa %xmm6, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: pandn %xmm14, %xmm0
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: pandn %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm9, %xmm5
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm14, %xmm2
; SSE-NEXT: pand %xmm12, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm7, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE-NEXT: movdqa %xmm8, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; SSE-NEXT: pand %xmm7, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE-NEXT: pand %xmm8, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: pandn %xmm8, %xmm0
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pandn %xmm2, %xmm0
; SSE-NEXT: movdqa %xmm15, %xmm1
; SSE-NEXT: pand %xmm13, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE-NEXT: movdqa %xmm11, %xmm2
-; SSE-NEXT: pand %xmm11, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pxor %xmm3, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535,0,65535,0,65535]
+; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: pandn %xmm7, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm3, %xmm11
+; SSE-NEXT: pand %xmm13, %xmm11
+; SSE-NEXT: por %xmm0, %xmm11
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm5, %xmm9
-; SSE-NEXT: pand %xmm13, %xmm9
-; SSE-NEXT: por %xmm0, %xmm9
-; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: pand %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm4
; SSE-NEXT: pandn %xmm10, %xmm13
; SSE-NEXT: por %xmm0, %xmm13
; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm7
-; SSE-NEXT: movdqa %xmm12, %xmm5
-; SSE-NEXT: pandn %xmm1, %xmm5
-; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm1[0,2,2,3]
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pand %xmm2, %xmm13
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: pand %xmm2, %xmm12
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: pand %xmm2, %xmm14
-; SSE-NEXT: pand %xmm2, %xmm8
-; SSE-NEXT: movdqa %xmm8, (%rsp) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: pandn %xmm10, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,2,2,3]
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm2, %xmm10
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm6, %xmm2
-; SSE-NEXT: por %xmm10, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,0,65535,65535,65535,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm6, %xmm8
-; SSE-NEXT: pslld $16, %xmm8
-; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm1, %xmm15
-; SSE-NEXT: psrldq {{.*#+}} xmm15 = xmm15[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm6[0],xmm15[1],xmm6[1],xmm15[2],xmm6[2],xmm15[3],xmm6[3]
-; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,0,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm12, %xmm6
+; SSE-NEXT: movdqa %xmm12, %xmm3
+; SSE-NEXT: pandn %xmm7, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pxor %xmm10, %xmm10
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm9[0,1,2,3,6,4,6,5]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm7[0,2,2,3]
+; SSE-NEXT: movdqa %xmm7, %xmm15
+; SSE-NEXT: pand %xmm10, %xmm7
+; SSE-NEXT: por %xmm0, %xmm7
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pand %xmm10, %xmm8
+; SSE-NEXT: movdqa %xmm5, %xmm12
+; SSE-NEXT: pand %xmm10, %xmm12
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT: pand %xmm10, %xmm13
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pand %xmm10, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
-; SSE-NEXT: pxor %xmm9, %xmm9
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm0[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,4,6,5]
+; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: pand %xmm4, %xmm10
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: pandn %xmm4, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: pand %xmm0, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
+; SSE-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm1, %xmm10
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,65535,65535]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm7, %xmm6
+; SSE-NEXT: pslld $16, %xmm6
+; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm2, %xmm14
+; SSE-NEXT: psrldq {{.*#+}} xmm14 = xmm14[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm7[0],xmm14[1],xmm7[1],xmm14[2],xmm7[2],xmm14[3],xmm7[3]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pxor %xmm5, %xmm5
+; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3],xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm11[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,6,4,6,5]
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: pand %xmm10, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
-; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm0[0,1,2,3,7,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm11[0,1,2,3,6,4,6,5]
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: pand %xmm10, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: movdqa %xmm4, %xmm10
+; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
-; SSE-NEXT: pand %xmm7, %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm7, %xmm5
+; SSE-NEXT: pandn %xmm7, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pxor %xmm0, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,0,65535,65535,65535,65535,65535,65535]
+; SSE-NEXT: pand %xmm7, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm7, %xmm10
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE-NEXT: pxor %xmm10, %xmm10
; SSE-NEXT: pand %xmm7, %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm7, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm1, %xmm7
+; SSE-NEXT: pandn %xmm2, %xmm7
; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
-; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; SSE-NEXT: packuswb %xmm2, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm0, %xmm10
-; SSE-NEXT: pandn %xmm1, %xmm10
-; SSE-NEXT: pshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: # xmm1 = mem[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,3,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,6,7]
-; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: por %xmm1, %xmm10
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm10, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm8
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pandn %xmm2, %xmm11
+; SSE-NEXT: pshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: # xmm2 = mem[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,3,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,6,7]
+; SSE-NEXT: packuswb %xmm2, %xmm2
+; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: pand %xmm0, %xmm10
-; SSE-NEXT: por %xmm1, %xmm10
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; SSE-NEXT: pand %xmm3, %xmm10
-; SSE-NEXT: por %xmm0, %xmm10
-; SSE-NEXT: packuswb %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: por %xmm2, %xmm11
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
+; SSE-NEXT: movdqa %xmm7, %xmm4
+; SSE-NEXT: pandn %xmm11, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pand %xmm7, %xmm2
+; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: pand %xmm0, %xmm11
+; SSE-NEXT: por %xmm2, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3],xmm2[4],xmm10[4],xmm2[5],xmm10[5],xmm2[6],xmm10[6],xmm2[7],xmm10[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm10[8],xmm11[9],xmm10[9],xmm11[10],xmm10[10],xmm11[11],xmm10[11],xmm11[12],xmm10[12],xmm11[13],xmm10[13],xmm11[14],xmm10[14],xmm11[15],xmm10[15]
+; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: por %xmm0, %xmm11
+; SSE-NEXT: packuswb %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[0,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: por %xmm0, %xmm8
; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: por %xmm1, %xmm13
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: pxor %xmm11, %xmm11
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm11[8],xmm0[9],xmm11[9],xmm0[10],xmm11[10],xmm0[11],xmm11[11],xmm0[12],xmm11[12],xmm0[13],xmm11[13],xmm0[14],xmm11[14],xmm0[15],xmm11[15]
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3],xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: por %xmm1, %xmm8
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
+; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm11[8],xmm1[9],xmm11[9],xmm1[10],xmm11[10],xmm1[11],xmm11[11],xmm1[12],xmm11[12],xmm1[13],xmm11[13],xmm1[14],xmm11[14],xmm1[15],xmm11[15]
; SSE-NEXT: pxor %xmm6, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm6[0],xmm13[1],xmm6[1],xmm13[2],xmm6[2],xmm13[3],xmm6[3],xmm13[4],xmm6[4],xmm13[5],xmm6[5],xmm13[6],xmm6[6],xmm13[7],xmm6[7]
-; SSE-NEXT: pand %xmm3, %xmm13
-; SSE-NEXT: movdqa %xmm3, %xmm5
-; SSE-NEXT: por %xmm2, %xmm13
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1]
-; SSE-NEXT: movdqa %xmm11, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: pandn %xmm2, %xmm10
-; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3],xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7]
-; SSE-NEXT: pand %xmm1, %xmm11
-; SSE-NEXT: por %xmm10, %xmm11
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
-; SSE-NEXT: packuswb %xmm2, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pandn %xmm1, %xmm11
+; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3],xmm9[4],xmm6[4],xmm9[5],xmm6[5],xmm9[6],xmm6[6],xmm9[7],xmm6[7]
+; SSE-NEXT: pand %xmm0, %xmm9
+; SSE-NEXT: por %xmm11, %xmm9
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm9[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE-NEXT: packuswb %xmm1, %xmm4
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,3,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm6, %xmm2
-; SSE-NEXT: movdqa %xmm6, %xmm13
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: pand %xmm8, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm6, %xmm5
+; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,3,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: por %xmm1, %xmm5
+; SSE-NEXT: pand %xmm7, %xmm5
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm11, %xmm1
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm10, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm6, %xmm6
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE-NEXT: movdqa %xmm5, %xmm11
-; SSE-NEXT: movdqa %xmm5, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
+; SSE-NEXT: pand %xmm11, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE-NEXT: movdqa %xmm3, %xmm5
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm5, %xmm6
; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: packuswb %xmm15, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm15
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: packuswb %xmm14, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm7, %xmm1
+; SSE-NEXT: movdqa %xmm7, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pandn %xmm5, %xmm2
+; SSE-NEXT: pandn %xmm8, %xmm2
; SSE-NEXT: por %xmm2, %xmm12
; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
-; SSE-NEXT: movdqa %xmm11, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3],xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
-; SSE-NEXT: pand %xmm11, %xmm12
+; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; SSE-NEXT: pand %xmm6, %xmm12
; SSE-NEXT: por %xmm3, %xmm12
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[1,3,2,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,3,2,3]
+; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE-NEXT: pxor %xmm7, %xmm7
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,5,4,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,1,1]
+; SSE-NEXT: packuswb %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,3,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
+; SSE-NEXT: packuswb %xmm2, %xmm2
+; SSE-NEXT: pand %xmm9, %xmm2
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm11, %xmm1
+; SSE-NEXT: pandn %xmm10, %xmm1
+; SSE-NEXT: pand %xmm11, %xmm15
+; SSE-NEXT: por %xmm1, %xmm15
+; SSE-NEXT: movdqa %xmm15, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm7[0],xmm15[1],xmm7[1],xmm15[2],xmm7[2],xmm15[3],xmm7[3],xmm15[4],xmm7[4],xmm15[5],xmm7[5],xmm15[6],xmm7[6],xmm15[7],xmm7[7]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm1[4],xmm15[5],xmm1[5],xmm15[6],xmm1[6],xmm15[7],xmm1[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm15[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,6]
+; SSE-NEXT: psrlq $48, %xmm1
+; SSE-NEXT: packuswb %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm9, %xmm14
+; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
-; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm13, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,3,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm13, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,0,65535,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm11, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: pandn %xmm9, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: pand %xmm11, %xmm3
+; SSE-NEXT: movdqa %xmm11, %xmm12
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3],xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,0,65535,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm4, %xmm5
+; SSE-NEXT: pandn %xmm2, %xmm5
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm7[8],xmm3[9],xmm7[9],xmm3[10],xmm7[10],xmm3[11],xmm7[11],xmm3[12],xmm7[12],xmm3[13],xmm7[13],xmm3[14],xmm7[14],xmm3[15],xmm7[15]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: por %xmm5, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[3,2,1,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
+; SSE-NEXT: packuswb %xmm2, %xmm2
+; SSE-NEXT: pand %xmm14, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pand %xmm15, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm7, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm10, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,3,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
-; SSE-NEXT: psrlq $48, %xmm0
-; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: pandn %xmm11, %xmm1
+; SSE-NEXT: por %xmm1, %xmm13
+; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm7[0],xmm13[1],xmm7[1],xmm13[2],xmm7[2],xmm13[3],xmm7[3],xmm13[4],xmm7[4],xmm13[5],xmm7[5],xmm13[6],xmm7[6],xmm13[7],xmm7[7]
+; SSE-NEXT: pand %xmm5, %xmm13
+; SSE-NEXT: por %xmm3, %xmm13
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm14[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm13[0,2,1,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,6,7]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm15
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: pandn %xmm6, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,3,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,6]
+; SSE-NEXT: psrlq $48, %xmm1
+; SSE-NEXT: packuswb %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: movdqa %xmm8, %xmm13
+; SSE-NEXT: pandn %xmm8, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pand %xmm12, %xmm3
+; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: pandn %xmm9, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: pand %xmm3, %xmm2
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,0,65535,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
-; SSE-NEXT: pand %xmm4, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[3,2,1,0,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
+; SSE-NEXT: movdqa %xmm4, %xmm5
+; SSE-NEXT: pandn %xmm1, %xmm5
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm7[8],xmm3[9],xmm7[9],xmm3[10],xmm7[10],xmm3[11],xmm7[11],xmm3[12],xmm7[12],xmm3[13],xmm7[13],xmm3[14],xmm7[14],xmm3[15],xmm7[15]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: por %xmm5, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[3,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm13, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pandn %xmm12, %xmm0
-; SSE-NEXT: por %xmm0, %xmm14
-; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
-; SSE-NEXT: movdqa %xmm11, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3],xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7]
-; SSE-NEXT: pand %xmm11, %xmm14
-; SSE-NEXT: por %xmm2, %xmm14
-; SSE-NEXT: pshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: # xmm0 = mem[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,65535,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm14[0,2,1,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,6,7]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: pand %xmm15, %xmm1
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: pandn %xmm8, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pand %xmm11, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
-; SSE-NEXT: psrlq $48, %xmm0
-; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,0,65535,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm11
-; SSE-NEXT: pandn %xmm5, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm5
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: pand %xmm8, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE-NEXT: movdqa %xmm4, %xmm5
-; SSE-NEXT: pandn %xmm0, %xmm5
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
-; SSE-NEXT: pand %xmm4, %xmm2
-; SSE-NEXT: por %xmm5, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[3,2,1,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm13, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm13
-; SSE-NEXT: por %xmm13, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pandn %xmm8, %xmm1
-; SSE-NEXT: movdqa (%rsp), %xmm5 # 16-byte Reload
-; SSE-NEXT: por %xmm1, %xmm5
-; SSE-NEXT: movdqa %xmm5, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; SSE-NEXT: pandn %xmm3, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: por %xmm1, %xmm6
+; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm6
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: por %xmm5, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm13[2,1,2,3,4,5,6,7]
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm12[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: movdqa %xmm15, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[0,2,1,0,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[0,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: pand %xmm15, %xmm1
; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
; SSE-NEXT: movdqa %xmm15, %xmm1
; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: pand %xmm15, %xmm0
-; SSE-NEXT: movdqa %xmm15, %xmm14
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm9, %xmm0
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: pand %xmm15, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE-NEXT: pand %xmm15, %xmm5
+; SSE-NEXT: por %xmm5, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: pandn %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3],xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm1, %xmm6
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; SSE-NEXT: por %xmm6, %xmm5
-; SSE-NEXT: packuswb %xmm0, %xmm5
+; SSE-NEXT: packuswb %xmm1, %xmm5
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,1,3,3]
-; SSE-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,0,65535,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm9, %xmm2
-; SSE-NEXT: pandn %xmm12, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm7, %xmm5
-; SSE-NEXT: pand %xmm9, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,1,3,3]
+; SSE-NEXT: movss {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm6, %xmm2
+; SSE-NEXT: pandn %xmm11, %xmm2
+; SSE-NEXT: movdqa %xmm11, %xmm8
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm9, %xmm5
+; SSE-NEXT: pand %xmm6, %xmm5
+; SSE-NEXT: movdqa %xmm6, %xmm11
; SSE-NEXT: por %xmm2, %xmm5
; SSE-NEXT: movdqa %xmm5, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3],xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
; SSE-NEXT: movdqa %xmm4, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15]
; SSE-NEXT: pand %xmm4, %xmm5
; SSE-NEXT: por %xmm6, %xmm5
-; SSE-NEXT: pshufd $100, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: # xmm2 = mem[0,1,2,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm14[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,65535,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm14, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm6
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: pand %xmm14, %xmm2
; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm14, %xmm1
-; SSE-NEXT: pandn %xmm6, %xmm14
-; SSE-NEXT: andps %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: por %xmm0, %xmm14
-; SSE-NEXT: movdqa %xmm15, %xmm1
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: movdqa %xmm11, %xmm15
-; SSE-NEXT: pandn %xmm11, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm3
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,1,2,3]
+; SSE-NEXT: movdqa %xmm15, %xmm2
+; SSE-NEXT: pandn %xmm6, %xmm2
+; SSE-NEXT: andps %xmm15, %xmm1
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: pandn %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: por %xmm1, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm0, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: packuswb %xmm0, %xmm2
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: pandn %xmm1, %xmm7
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: packuswb %xmm1, %xmm6
; SSE-NEXT: packuswb %xmm5, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,3,3]
-; SSE-NEXT: movss {{.*#+}} xmm0 = xmm5[0],xmm0[1,2,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm9, %xmm2
-; SSE-NEXT: pand %xmm9, %xmm5
-; SSE-NEXT: pandn %xmm8, %xmm2
-; SSE-NEXT: movdqa %xmm8, %xmm9
-; SSE-NEXT: por %xmm5, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,1,3,3]
+; SSE-NEXT: movss {{.*#+}} xmm1 = xmm5[0],xmm1[1,2,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm11, %xmm6
+; SSE-NEXT: pand %xmm11, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm6
+; SSE-NEXT: por %xmm5, %xmm6
+; SSE-NEXT: movdqa %xmm6, %xmm5
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; SSE-NEXT: pxor %xmm13, %xmm13
+; SSE-NEXT: pand %xmm4, %xmm6
; SSE-NEXT: pandn %xmm5, %xmm4
-; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: por %xmm6, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,7]
; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: pand %xmm10, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm13[0,1,2,1]
+; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: pand %xmm14, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm12[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm5, %xmm5
-; SSE-NEXT: pandn %xmm5, %xmm10
-; SSE-NEXT: por %xmm4, %xmm10
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: pandn %xmm10, %xmm4
-; SSE-NEXT: andps %xmm6, %xmm0
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [0,65535,65535,65535,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm10, %xmm6
-; SSE-NEXT: pandn %xmm5, %xmm6
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: por %xmm6, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm0[0,1,2,3,5,4,7,6]
+; SSE-NEXT: pandn %xmm5, %xmm0
+; SSE-NEXT: por %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm15, %xmm2
+; SSE-NEXT: pandn %xmm0, %xmm2
+; SSE-NEXT: andps %xmm15, %xmm1
+; SSE-NEXT: movdqa %xmm15, %xmm14
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm13[8],xmm5[9],xmm13[9],xmm5[10],xmm13[10],xmm5[11],xmm13[11],xmm5[12],xmm13[12],xmm5[13],xmm13[13],xmm5[14],xmm13[14],xmm5[15],xmm13[15]
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [0,65535,65535,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm6, %xmm7
+; SSE-NEXT: pandn %xmm5, %xmm7
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
+; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: por %xmm7, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,5,4,7,6]
; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: packuswb %xmm5, %xmm8
-; SSE-NEXT: movdqa %xmm2, %xmm11
+; SSE-NEXT: packuswb %xmm5, %xmm7
+; SSE-NEXT: movdqa %xmm3, %xmm11
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pandn %xmm0, %xmm11
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm13[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm10[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm13[8],xmm1[9],xmm13[9],xmm1[10],xmm13[10],xmm1[11],xmm13[11],xmm1[12],xmm13[12],xmm1[13],xmm13[13],xmm1[14],xmm13[14],xmm1[15],xmm13[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1],xmm5[2],xmm13[2],xmm5[3],xmm13[3],xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
; SSE-NEXT: packuswb %xmm5, %xmm5
-; SSE-NEXT: movss {{.*#+}} xmm8 = xmm5[0],xmm8[1,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: pandn %xmm12, %xmm0
-; SSE-NEXT: movdqa %xmm7, %xmm5
-; SSE-NEXT: pand %xmm6, %xmm5
-; SSE-NEXT: por %xmm0, %xmm5
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: pandn %xmm0, %xmm7
-; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
-; SSE-NEXT: pand %xmm6, %xmm5
-; SSE-NEXT: por %xmm7, %xmm5
+; SSE-NEXT: movss {{.*#+}} xmm7 = xmm5[0],xmm7[1,2,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm15, %xmm1
+; SSE-NEXT: pandn %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm5
+; SSE-NEXT: pand %xmm15, %xmm5
+; SSE-NEXT: por %xmm1, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm9
+; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,0,65535,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pandn %xmm9, %xmm10
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm13[8],xmm5[9],xmm13[9],xmm5[10],xmm13[10],xmm5[11],xmm13[11],xmm5[12],xmm13[12],xmm5[13],xmm13[13],xmm5[14],xmm13[14],xmm5[15],xmm13[15]
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: por %xmm10, %xmm5
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,7,6]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
-; SSE-NEXT: movdqa %xmm7, %xmm12
-; SSE-NEXT: pandn %xmm0, %xmm12
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: por %xmm0, %xmm12
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
-; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,7,6]
+; SSE-NEXT: packuswb %xmm9, %xmm9
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
+; SSE-NEXT: movdqa %xmm10, %xmm12
+; SSE-NEXT: pandn %xmm9, %xmm12
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,0,3,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
+; SSE-NEXT: packuswb %xmm5, %xmm5
+; SSE-NEXT: pand %xmm10, %xmm5
+; SSE-NEXT: por %xmm5, %xmm12
+; SSE-NEXT: movdqa %xmm14, %xmm5
; SSE-NEXT: pandn %xmm12, %xmm5
-; SSE-NEXT: andps %xmm0, %xmm8
-; SSE-NEXT: por %xmm8, %xmm5
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm0, %xmm12
-; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm1[8],xmm12[9],xmm1[9],xmm12[10],xmm1[10],xmm12[11],xmm1[11],xmm12[12],xmm1[12],xmm12[13],xmm1[13],xmm12[14],xmm1[14],xmm12[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm12, %xmm10
-; SSE-NEXT: por %xmm0, %xmm10
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm0[0,1,2,3,5,4,7,6]
-; SSE-NEXT: psrldq {{.*#+}} xmm12 = xmm12[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: packuswb %xmm12, %xmm8
-; SSE-NEXT: movdqa %xmm13, %xmm12
-; SSE-NEXT: pand %xmm2, %xmm12
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[0,2,2,3]
-; SSE-NEXT: pand %xmm2, %xmm10
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm15, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm15[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
-; SSE-NEXT: movdqa %xmm0, %xmm10
-; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm1[8],xmm10[9],xmm1[9],xmm10[10],xmm1[10],xmm10[11],xmm1[11],xmm10[12],xmm1[12],xmm10[13],xmm1[13],xmm10[14],xmm1[14],xmm10[15],xmm1[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movss {{.*#+}} xmm8 = xmm0[0],xmm8[1,2,3]
-; SSE-NEXT: movdqa %xmm3, %xmm13
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: pandn %xmm9, %xmm3
-; SSE-NEXT: movdqa %xmm9, %xmm15
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; SSE-NEXT: pand %xmm6, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: por %xmm3, %xmm6
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,7,6]
-; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: andps %xmm14, %xmm7
+; SSE-NEXT: por %xmm7, %xmm5
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: pand %xmm3, %xmm7
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm7, %xmm9
-; SSE-NEXT: pandn %xmm0, %xmm9
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: por %xmm0, %xmm9
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm13[8],xmm9[9],xmm13[9],xmm9[10],xmm13[10],xmm9[11],xmm13[11],xmm9[12],xmm13[12],xmm9[13],xmm13[13],xmm9[14],xmm13[14],xmm9[15],xmm13[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
+; SSE-NEXT: pand %xmm6, %xmm7
+; SSE-NEXT: pandn %xmm9, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,5,4,7,6]
+; SSE-NEXT: psrldq {{.*#+}} xmm9 = xmm9[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: packuswb %xmm9, %xmm7
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm12
+; SSE-NEXT: pand %xmm3, %xmm12
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm8[0,2,2,3]
+; SSE-NEXT: pand %xmm3, %xmm8
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: pandn %xmm9, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1]
+; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm13[8],xmm9[9],xmm13[9],xmm9[10],xmm13[10],xmm9[11],xmm13[11],xmm9[12],xmm13[12],xmm9[13],xmm13[13],xmm9[14],xmm13[14],xmm9[15],xmm13[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3]
+; SSE-NEXT: packuswb %xmm6, %xmm6
+; SSE-NEXT: movss {{.*#+}} xmm7 = xmm6[0],xmm7[1,2,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm4, %xmm6
+; SSE-NEXT: movdqa %xmm15, %xmm3
+; SSE-NEXT: pand %xmm15, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: por %xmm6, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm13[8],xmm3[9],xmm13[9],xmm3[10],xmm13[10],xmm3[11],xmm13[11],xmm3[12],xmm13[12],xmm3[13],xmm13[13],xmm3[14],xmm13[14],xmm3[15],xmm13[15]
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm6, %xmm1
+; SSE-NEXT: por %xmm3, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,7,6]
+; SSE-NEXT: packuswb %xmm6, %xmm6
+; SSE-NEXT: movdqa %xmm10, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,3,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: por %xmm1, %xmm9
+; SSE-NEXT: movdqa %xmm14, %xmm6
; SSE-NEXT: pandn %xmm9, %xmm6
-; SSE-NEXT: andps %xmm3, %xmm8
-; SSE-NEXT: por %xmm8, %xmm6
-; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: por %xmm11, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: andps %xmm14, %xmm7
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: movdqa %xmm12, %xmm7
+; SSE-NEXT: por %xmm11, %xmm7
+; SSE-NEXT: movdqa %xmm7, %xmm1
; SSE-NEXT: pxor %xmm9, %xmm9
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm1[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15]
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: pandn %xmm8, %xmm10
-; SSE-NEXT: movdqa %xmm8, %xmm9
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: por %xmm10, %xmm8
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm8[0,1,2,3,4,7,6,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm9[3,3,3,3]
-; SSE-NEXT: packuswb %xmm8, %xmm10
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movss {{.*#+}} xmm10 = xmm0[0],xmm10[1,2,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm7, %xmm8
-; SSE-NEXT: pandn %xmm0, %xmm8
-; SSE-NEXT: pshufd $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: # xmm0 = mem[1,3,2,3]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; SSE-NEXT: # xmm11 = mem[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,0,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm9, %xmm12
-; SSE-NEXT: pandn %xmm0, %xmm12
-; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm1[8],xmm11[9],xmm1[9],xmm11[10],xmm1[10],xmm11[11],xmm1[11],xmm11[12],xmm1[12],xmm11[13],xmm1[13],xmm11[14],xmm1[14],xmm11[15],xmm1[15]
-; SSE-NEXT: pand %xmm9, %xmm11
-; SSE-NEXT: por %xmm12, %xmm11
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,1,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm9[8],xmm3[9],xmm9[9],xmm3[10],xmm9[10],xmm3[11],xmm9[11],xmm3[12],xmm9[12],xmm3[13],xmm9[13],xmm3[14],xmm9[14],xmm3[15],xmm9[15]
+; SSE-NEXT: pxor %xmm15, %xmm15
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: pandn %xmm3, %xmm9
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: por %xmm9, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm7[0,1,2,3,4,7,6,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[3,3,3,3]
+; SSE-NEXT: packuswb %xmm7, %xmm9
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: movss {{.*#+}} xmm9 = xmm1[0],xmm9[1,2,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: movdqa %xmm10, %xmm11
+; SSE-NEXT: pandn %xmm1, %xmm11
+; SSE-NEXT: pshufd $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: # xmm1 = mem[1,3,2,3]
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
+; SSE-NEXT: # xmm12 = mem[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm1[0],xmm12[1],xmm1[1]
+; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,0,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm7, %xmm13
+; SSE-NEXT: pandn %xmm1, %xmm13
+; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm15[8],xmm12[9],xmm15[9],xmm12[10],xmm15[10],xmm12[11],xmm15[11],xmm12[12],xmm15[12],xmm12[13],xmm15[13],xmm12[14],xmm15[14],xmm12[15],xmm15[15]
+; SSE-NEXT: pand %xmm7, %xmm12
+; SSE-NEXT: por %xmm13, %xmm12
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,1,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
+; SSE-NEXT: packuswb %xmm1, %xmm12
+; SSE-NEXT: pand %xmm10, %xmm12
+; SSE-NEXT: por %xmm11, %xmm12
+; SSE-NEXT: movdqa %xmm14, %xmm1
+; SSE-NEXT: pandn %xmm12, %xmm1
+; SSE-NEXT: andps %xmm14, %xmm9
+; SSE-NEXT: por %xmm9, %xmm1
; SSE-NEXT: por %xmm8, %xmm0
-; SSE-NEXT: movaps %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: pandn %xmm0, %xmm8
-; SSE-NEXT: andps %xmm3, %xmm10
-; SSE-NEXT: por %xmm10, %xmm8
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm11, %xmm11
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3],xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm11[8],xmm2[9],xmm11[9],xmm2[10],xmm11[10],xmm2[11],xmm11[11],xmm2[12],xmm11[12],xmm2[13],xmm11[13],xmm2[14],xmm11[14],xmm2[15],xmm11[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm2[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm11[8],xmm2[9],xmm11[9],xmm2[10],xmm11[10],xmm2[11],xmm11[11],xmm2[12],xmm11[12],xmm2[13],xmm11[13],xmm2[14],xmm11[14],xmm2[15],xmm11[15]
+; SSE-NEXT: movdqa %xmm0, %xmm9
; SSE-NEXT: pxor %xmm12, %xmm12
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: pandn %xmm2, %xmm10
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: por %xmm10, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm3[0,2,1,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,7,6,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm2[3,3,3,3]
-; SSE-NEXT: packuswb %xmm11, %xmm10
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movss {{.*#+}} xmm10 = xmm0[0],xmm10[1,2,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm13[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
-; SSE-NEXT: pand %xmm9, %xmm11
+; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm12[0],xmm9[1],xmm12[1],xmm9[2],xmm12[2],xmm9[3],xmm12[3],xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm9[0,1,2,3,6,4,6,7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm12[8],xmm0[9],xmm12[9],xmm0[10],xmm12[10],xmm0[11],xmm12[11],xmm0[12],xmm12[12],xmm0[13],xmm12[13],xmm0[14],xmm12[14],xmm0[15],xmm12[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm12[8],xmm0[9],xmm12[9],xmm0[10],xmm12[10],xmm0[11],xmm12[11],xmm0[12],xmm12[12],xmm0[13],xmm12[13],xmm0[14],xmm12[14],xmm0[15],xmm12[15]
+; SSE-NEXT: pxor %xmm13, %xmm13
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
; SSE-NEXT: pandn %xmm0, %xmm9
-; SSE-NEXT: por %xmm11, %xmm9
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,1,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm2[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,4,7]
-; SSE-NEXT: packuswb %xmm9, %xmm9
-; SSE-NEXT: pandn %xmm9, %xmm7
-; SSE-NEXT: por %xmm7, %xmm0
-; SSE-NEXT: andps %xmm1, %xmm10
-; SSE-NEXT: andnps %xmm0, %xmm1
-; SSE-NEXT: orps %xmm10, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: por %xmm9, %xmm8
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,2,1,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm0[3,3,3,3]
+; SSE-NEXT: packuswb %xmm12, %xmm9
+; SSE-NEXT: packuswb %xmm11, %xmm11
+; SSE-NEXT: movss {{.*#+}} xmm9 = xmm11[0],xmm9[1,2,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm2[1,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm4[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
+; SSE-NEXT: movdqa %xmm12, %xmm11
+; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3],xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm13[8],xmm12[9],xmm13[9],xmm12[10],xmm13[10],xmm12[11],xmm13[11],xmm12[12],xmm13[12],xmm12[13],xmm13[13],xmm12[14],xmm13[14],xmm12[15],xmm13[15]
+; SSE-NEXT: pand %xmm7, %xmm12
+; SSE-NEXT: pandn %xmm11, %xmm7
+; SSE-NEXT: por %xmm12, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm0[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,4,7]
+; SSE-NEXT: packuswb %xmm11, %xmm11
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,0,3,4,5,6,7]
+; SSE-NEXT: packuswb %xmm7, %xmm7
+; SSE-NEXT: pand %xmm10, %xmm7
+; SSE-NEXT: pandn %xmm11, %xmm10
+; SSE-NEXT: por %xmm7, %xmm10
+; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: andps %xmm14, %xmm9
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: por %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm8
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, (%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, 16(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: movaps %xmm7, 16(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, (%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -4663,19 +4680,21 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movaps %xmm0, (%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%rcx)
-; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, (%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, (%r8)
+; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%r8)
-; SSE-NEXT: movdqa %xmm4, (%r9)
-; SSE-NEXT: movdqa %xmm14, 16(%r9)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, (%r9)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, 16(%r9)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE-NEXT: movdqa %xmm6, (%rax)
; SSE-NEXT: movdqa %xmm5, 16(%rax)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movaps %xmm1, (%rax)
-; SSE-NEXT: movdqa %xmm8, 16(%rax)
-; SSE-NEXT: addq $648, %rsp # imm = 0x288
+; SSE-NEXT: movdqa %xmm8, (%rax)
+; SSE-NEXT: movdqa %xmm1, 16(%rax)
+; SSE-NEXT: addq $632, %rsp # imm = 0x278
; SSE-NEXT: retq
;
; AVX-LABEL: load_i8_stride7_vf32:
@@ -7198,7 +7217,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5, ptr %out.vec6) nounwind {
; SSE-LABEL: load_i8_stride7_vf64:
; SSE: # %bb.0:
-; SSE-NEXT: subq $1528, %rsp # imm = 0x5F8
+; SSE-NEXT: subq $1512, %rsp # imm = 0x5E8
; SSE-NEXT: movdqa 208(%rdi), %xmm12
; SSE-NEXT: movdqa 192(%rdi), %xmm5
; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -7209,13 +7228,15 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa 128(%rdi), %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 160(%rdi), %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 144(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,0,65535,65535]
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm13
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: movdqa %xmm6, %xmm14
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm2, %xmm7
; SSE-NEXT: por %xmm0, %xmm1
@@ -7226,11 +7247,13 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,4,5,6]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,6]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
-; SSE-NEXT: packuswb %xmm0, %xmm2
+; SSE-NEXT: packuswb %xmm0, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,0,65535,65535,65535,0,65535]
; SSE-NEXT: movdqa %xmm11, %xmm1
; SSE-NEXT: pandn %xmm3, %xmm1
@@ -7239,11 +7262,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,65535,0,65535,0,65535]
-; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,65535,0,65535,0,65535]
+; SSE-NEXT: movdqa %xmm9, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; SSE-NEXT: pand %xmm10, %xmm3
+; SSE-NEXT: pand %xmm9, %xmm3
; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
@@ -7251,14 +7274,12 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pandn %xmm8, %xmm2
; SSE-NEXT: movdqa %xmm5, %xmm3
-; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: pand %xmm10, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
@@ -7294,7 +7315,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa %xmm7, %xmm1
; SSE-NEXT: pandn %xmm2, %xmm1
; SSE-NEXT: movdqa 272(%rdi), %xmm2
-; SSE-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm7, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
@@ -7319,10 +7340,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
-; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: movdqa %xmm9, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: pand %xmm9, %xmm2
; SSE-NEXT: por %xmm4, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
@@ -7333,11 +7354,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: movdqa 288(%rdi), %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pandn %xmm3, %xmm2
; SSE-NEXT: movdqa 304(%rdi), %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: pand %xmm10, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
@@ -7396,10 +7417,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
-; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: movdqa %xmm9, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: pand %xmm9, %xmm2
; SSE-NEXT: por %xmm4, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
@@ -7410,12 +7431,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: movdqa 400(%rdi), %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pandn %xmm3, %xmm2
-; SSE-NEXT: movdqa 416(%rdi), %xmm14
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: movdqa 416(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, (%rsp) # 16-byte Spill
+; SSE-NEXT: pand %xmm10, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
@@ -7471,10 +7491,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm1, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
-; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: movdqa %xmm9, %xmm5
; SSE-NEXT: pandn %xmm1, %xmm5
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; SSE-NEXT: pand %xmm10, %xmm4
+; SSE-NEXT: pand %xmm9, %xmm4
; SSE-NEXT: por %xmm5, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,2,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
@@ -7483,23 +7503,23 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa 64(%rdi), %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm0
-; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: pandn %xmm2, %xmm1
; SSE-NEXT: movdqa 80(%rdi), %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm9, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: pxor %xmm5, %xmm5
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,4,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,4,7]
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE-NEXT: movdqa 96(%rdi), %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
@@ -7513,19 +7533,18 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm2, %xmm2
; SSE-NEXT: movdqa %xmm12, %xmm4
; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm12, %xmm0
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: pand %xmm12, %xmm1
+; SSE-NEXT: por %xmm1, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm0
; SSE-NEXT: pandn %xmm4, %xmm8
-; SSE-NEXT: por %xmm1, %xmm8
+; SSE-NEXT: por %xmm0, %xmm8
; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm2, %xmm13
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: pandn %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm14, %xmm1
+; SSE-NEXT: pand %xmm6, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
@@ -7539,29 +7558,30 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrld $16, %xmm2
; SSE-NEXT: packuswb %xmm2, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: movdqa %xmm4, %xmm8
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm10, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pand %xmm9, %xmm4
+; SSE-NEXT: pand %xmm10, %xmm4
+; SSE-NEXT: movdqa %xmm10, %xmm8
; SSE-NEXT: por %xmm1, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm6, %xmm5
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm10, %xmm5
; SSE-NEXT: pandn %xmm1, %xmm5
; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15]
-; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: pand %xmm10, %xmm4
; SSE-NEXT: por %xmm5, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm11, %xmm13
; SSE-NEXT: movdqa %xmm11, %xmm2
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
@@ -7569,10 +7589,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: por %xmm2, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
-; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: movdqa %xmm9, %xmm5
; SSE-NEXT: pandn %xmm2, %xmm5
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3],xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
-; SSE-NEXT: pand %xmm10, %xmm4
+; SSE-NEXT: pand %xmm9, %xmm4
; SSE-NEXT: por %xmm5, %xmm4
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: pslld $16, %xmm2
@@ -7587,17 +7607,16 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm2, %xmm2
; SSE-NEXT: pand %xmm12, %xmm2
; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
-; SSE-NEXT: movdqa %xmm4, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
+; SSE-NEXT: movdqa %xmm14, %xmm2
; SSE-NEXT: pandn %xmm5, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: pand %xmm14, %xmm1
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm6, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa (%rsp), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
@@ -7610,43 +7629,45 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: packuswb %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm8, %xmm4
+; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm8, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm9, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
-; SSE-NEXT: movdqa %xmm6, %xmm5
+; SSE-NEXT: movdqa %xmm10, %xmm5
; SSE-NEXT: pandn %xmm1, %xmm5
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
-; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: pand %xmm10, %xmm2
; SSE-NEXT: por %xmm5, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: movdqa %xmm3, %xmm11
; SSE-NEXT: por %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm11, %xmm2
+; SSE-NEXT: movdqa %xmm13, %xmm2
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pand %xmm11, %xmm4
+; SSE-NEXT: pand %xmm13, %xmm4
; SSE-NEXT: por %xmm2, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
-; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: movdqa %xmm9, %xmm5
; SSE-NEXT: pandn %xmm2, %xmm5
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3],xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
-; SSE-NEXT: pand %xmm10, %xmm4
+; SSE-NEXT: pand %xmm9, %xmm4
; SSE-NEXT: por %xmm5, %xmm4
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: pslld $16, %xmm2
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
; SSE-NEXT: packuswb %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm12, %xmm3
; SSE-NEXT: movdqa %xmm12, %xmm5
; SSE-NEXT: pandn %xmm2, %xmm5
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,3,2,3]
@@ -7655,15 +7676,17 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm2, %xmm2
; SSE-NEXT: pand %xmm12, %xmm2
; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm14, %xmm12
+; SSE-NEXT: movdqa %xmm14, %xmm2
; SSE-NEXT: pandn %xmm5, %xmm2
-; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: pand %xmm14, %xmm1
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm6, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
@@ -7676,68 +7699,67 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: packuswb %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm8, %xmm4
+; SSE-NEXT: movdqa %xmm11, %xmm4
; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm8, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm9, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
-; SSE-NEXT: movdqa %xmm6, %xmm5
+; SSE-NEXT: movdqa %xmm10, %xmm5
; SSE-NEXT: pandn %xmm1, %xmm5
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
-; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: pand %xmm10, %xmm2
; SSE-NEXT: por %xmm5, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm11, %xmm1
; SSE-NEXT: por %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm11, %xmm2
-; SSE-NEXT: pandn %xmm14, %xmm2
+; SSE-NEXT: movdqa %xmm13, %xmm2
+; SSE-NEXT: pandn (%rsp), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pand %xmm11, %xmm4
+; SSE-NEXT: pand %xmm13, %xmm4
; SSE-NEXT: por %xmm2, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
-; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: movdqa %xmm9, %xmm5
; SSE-NEXT: pandn %xmm2, %xmm5
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3],xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
-; SSE-NEXT: pand %xmm10, %xmm4
+; SSE-NEXT: pand %xmm9, %xmm4
; SSE-NEXT: por %xmm5, %xmm4
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: pslld $16, %xmm2
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
; SSE-NEXT: packuswb %xmm5, %xmm2
-; SSE-NEXT: movdqa %xmm12, %xmm5
+; SSE-NEXT: movdqa %xmm3, %xmm5
; SSE-NEXT: pandn %xmm2, %xmm5
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,6,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm12, %xmm2
; SSE-NEXT: pandn %xmm5, %xmm2
-; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm6, %xmm1
; SSE-NEXT: pandn %xmm15, %xmm1
-; SSE-NEXT: movdqa %xmm15, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm15, %xmm2
-; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: movdqa %xmm15, %xmm12
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: pxor %xmm4, %xmm4
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE-NEXT: pxor %xmm5, %xmm5
+; SSE-NEXT: pxor %xmm3, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
@@ -7745,204 +7767,200 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm11, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm8, %xmm0
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm9, %xmm2
-; SSE-NEXT: movdqa %xmm9, %xmm12
+; SSE-NEXT: pand %xmm8, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: movdqa %xmm10, %xmm4
; SSE-NEXT: pandn %xmm0, %xmm4
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; SSE-NEXT: pand %xmm10, %xmm2
; SSE-NEXT: por %xmm4, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: pandn %xmm14, %xmm0
+; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; SSE-NEXT: pandn %xmm15, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pand %xmm11, %xmm1
+; SSE-NEXT: pand %xmm13, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: por %xmm1, %xmm10
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: por %xmm1, %xmm9
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm13, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; SSE-NEXT: pandn %xmm7, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: pand %xmm11, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: movdqa (%rsp), %xmm8 # 16-byte Reload
-; SSE-NEXT: pandn %xmm8, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm5, %xmm1
-; SSE-NEXT: pand %xmm11, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: pandn %xmm9, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: pand %xmm11, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: pand %xmm11, %xmm0
-; SSE-NEXT: movdqa %xmm15, %xmm6
-; SSE-NEXT: pandn %xmm15, %xmm11
-; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,0,65535,65535,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: pandn %xmm7, %xmm2
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pandn %xmm8, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,2,2,3]
+; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: pandn %xmm14, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm5, %xmm11
+; SSE-NEXT: pandn %xmm5, %xmm13
+; SSE-NEXT: por %xmm0, %xmm13
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm7, %xmm5
+; SSE-NEXT: pandn %xmm7, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,2,2,3]
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm15, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm5
+; SSE-NEXT: por %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm8, %xmm2
+; SSE-NEXT: pandn %xmm8, %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: movdqa %xmm8, %xmm4
-; SSE-NEXT: pandn %xmm8, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[0,2,2,3]
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,2,2,3]
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm15, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, (%rsp) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: movdqa %xmm9, %xmm4
-; SSE-NEXT: pandn %xmm9, %xmm1
+; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: pandn %xmm14, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[0,2,2,3]
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm15, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: pand %xmm15, %xmm9
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: pand %xmm15, %xmm12
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm15, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm6, %xmm14
+; SSE-NEXT: por %xmm0, %xmm14
+; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: pand %xmm6, %xmm7
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pand %xmm6, %xmm10
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pand %xmm15, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: pandn %xmm6, %xmm4
+; SSE-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
+; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm6, %xmm15
+; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: pandn %xmm11, %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,2,2,3]
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm12, %xmm6
+; SSE-NEXT: por %xmm0, %xmm6
; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm15, %xmm0
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm3, %xmm15
-; SSE-NEXT: por %xmm0, %xmm15
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: pandn %xmm7, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: pandn %xmm5, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm7, %xmm10
-; SSE-NEXT: movdqa %xmm7, %xmm4
+; SSE-NEXT: movdqa %xmm5, %xmm6
+; SSE-NEXT: movdqa %xmm5, %xmm8
; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pandn %xmm6, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: pandn %xmm4, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm6, %xmm8
+; SSE-NEXT: movdqa %xmm4, %xmm13
+; SSE-NEXT: movdqa %xmm4, %xmm11
; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: pandn %xmm5, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm5, %xmm1
-; SSE-NEXT: movdqa %xmm5, %xmm11
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm1, %xmm9
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm3, %xmm13
-; SSE-NEXT: pslld $16, %xmm13
-; SSE-NEXT: psrldq {{.*#+}} xmm10 = xmm10[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE-NEXT: movdqa %xmm3, %xmm12
+; SSE-NEXT: pslld $16, %xmm12
+; SSE-NEXT: psrldq {{.*#+}} xmm6 = xmm6[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3]
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3]
+; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3]
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm4
-; SSE-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm15[4],xmm8[5],xmm15[5],xmm8[6],xmm15[6],xmm8[7],xmm15[7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm2, %xmm13
+; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3]
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm8
-; SSE-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; SSE-NEXT: movdqa %xmm14, %xmm8
+; SSE-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm15, %xmm13
+; SSE-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm9
+; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: movdqa %xmm3, %xmm11
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pxor %xmm0, %xmm0
@@ -7975,35 +7993,33 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm0[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm15[0,1,2,3,6,4,6,5]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm15[0,1,2,3,6,4,6,5]
+; SSE-NEXT: pand %xmm14, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: pand %xmm14, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm14, %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pandn %xmm8, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm14, %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: movdqa %xmm11, %xmm6
-; SSE-NEXT: pandn %xmm11, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pand %xmm14, %xmm5
; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: pand %xmm14, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: pandn %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm14, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: pandn %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm14, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pand %xmm14, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm11, %xmm4
+; SSE-NEXT: pandn %xmm11, %xmm14
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pxor %xmm0, %xmm0
@@ -8011,37 +8027,37 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,0,65535,65535,65535,65535,65535,65535]
; SSE-NEXT: pand %xmm11, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm11, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload
+; SSE-NEXT: pand %xmm11, %xmm8
+; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSE-NEXT: pand %xmm11, %xmm3
-; SSE-NEXT: movdqa %xmm3, (%rsp) # 16-byte Spill
-; SSE-NEXT: pand %xmm11, %xmm8
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm11, %xmm13
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE-NEXT: pxor %xmm8, %xmm8
+; SSE-NEXT: pxor %xmm13, %xmm13
; SSE-NEXT: pand %xmm11, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm11, %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm11, %xmm9
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3],xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7]
; SSE-NEXT: pand %xmm11, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm11, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm11, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pandn %xmm2, %xmm11
; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1],xmm2[2],xmm13[2],xmm2[3],xmm13[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
; SSE-NEXT: packuswb %xmm2, %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,65535,0]
; SSE-NEXT: movdqa %xmm0, %xmm15
@@ -8052,14 +8068,14 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,6,7]
; SSE-NEXT: packuswb %xmm3, %xmm3
; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm8
; SSE-NEXT: por %xmm3, %xmm15
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pandn %xmm15, %xmm3
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm13
+; SSE-NEXT: movdqa %xmm0, %xmm5
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,0,65535]
@@ -8067,201 +8083,204 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; SSE-NEXT: pand %xmm0, %xmm15
-; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm0, %xmm9
; SSE-NEXT: por %xmm3, %xmm15
; SSE-NEXT: movdqa %xmm15, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3],xmm3[4],xmm13[4],xmm3[5],xmm13[5],xmm3[6],xmm13[6],xmm3[7],xmm13[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm8[8],xmm15[9],xmm8[9],xmm15[10],xmm8[10],xmm15[11],xmm8[11],xmm15[12],xmm8[12],xmm15[13],xmm8[13],xmm15[14],xmm8[14],xmm15[15],xmm8[15]
-; SSE-NEXT: pand %xmm14, %xmm15
+; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
+; SSE-NEXT: pand %xmm2, %xmm15
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: por %xmm0, %xmm15
-; SSE-NEXT: packuswb %xmm10, %xmm0
-; SSE-NEXT: movdqa %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm8, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: pand %xmm8, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm13, %xmm3
+; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: por %xmm0, %xmm9
-; SSE-NEXT: movdqa %xmm9, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
-; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: por %xmm0, %xmm7
+; SSE-NEXT: movdqa %xmm7, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
+; SSE-NEXT: movdqa %xmm1, %xmm6
+; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; SSE-NEXT: pand %xmm14, %xmm9
-; SSE-NEXT: por %xmm2, %xmm9
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,3,2,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
+; SSE-NEXT: pand %xmm1, %xmm7
+; SSE-NEXT: por %xmm2, %xmm7
+; SSE-NEXT: pshufd $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: # xmm0 = mem[1,3,2,3]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,0,65535,65535,65535]
; SSE-NEXT: movdqa %xmm0, %xmm15
; SSE-NEXT: pandn %xmm2, %xmm15
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: por %xmm15, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm1[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,4,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
; SSE-NEXT: packuswb %xmm2, %xmm11
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm6, %xmm2
-; SSE-NEXT: pandn %xmm11, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,2,1,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pandn %xmm11, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,3,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm6, %xmm1
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pand %xmm13, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm5, %xmm15
-; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm12
+; SSE-NEXT: por %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm9, %xmm15
+; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: pand %xmm9, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
-; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
-; SSE-NEXT: pand %xmm14, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
+; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: packuswb {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm8, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm4, %xmm10
+; SSE-NEXT: pand %xmm8, %xmm1
; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm9
; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: por %xmm2, %xmm12
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
-; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: por %xmm2, %xmm10
+; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3],xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
-; SSE-NEXT: pand %xmm14, %xmm12
-; SSE-NEXT: por %xmm3, %xmm12
+; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3],xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
+; SSE-NEXT: pand %xmm6, %xmm10
+; SSE-NEXT: por %xmm3, %xmm10
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm11[1,3,2,3]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
; SSE-NEXT: pand %xmm0, %xmm4
; SSE-NEXT: por %xmm3, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
; SSE-NEXT: packuswb %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: movdqa %xmm12, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,3,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: pand %xmm12, %xmm2
; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: pand %xmm13, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
; SSE-NEXT: por %xmm1, %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm5, %xmm1
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm15, %xmm1
+; SSE-NEXT: pandn (%rsp), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: pand %xmm15, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
-; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
-; SSE-NEXT: pand %xmm14, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
+; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: packuswb {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: movdqa %xmm8, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm8, %xmm10
; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm1
; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
; SSE-NEXT: por %xmm2, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
-; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
-; SSE-NEXT: pand %xmm14, %xmm4
-; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm5
-; SSE-NEXT: pshufd $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: # xmm2 = mem[1,3,2,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
+; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[1,3,2,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
-; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,1,2,1]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1],xmm5[2],xmm13[2],xmm5[3],xmm13[3],xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
+; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
; SSE-NEXT: packuswb %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,2,1,3]
+; SSE-NEXT: movdqa %xmm12, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,3,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm6, %xmm2
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: pand %xmm13, %xmm4
-; SSE-NEXT: por %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pand %xmm9, %xmm5
+; SSE-NEXT: por %xmm1, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm15, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: pand %xmm15, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
-; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
-; SSE-NEXT: pand %xmm14, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
+; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: packuswb {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm10, %xmm3
@@ -8272,26 +8291,27 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: pand %xmm10, %xmm1
; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm9, %xmm7
; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
-; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
-; SSE-NEXT: pand %xmm14, %xmm4
-; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: pshufd $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: # xmm2 = mem[1,3,2,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1],xmm5[2],xmm13[2],xmm5[3],xmm13[3],xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
+; SSE-NEXT: pand %xmm6, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[1,3,2,3]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3],xmm3[4],xmm13[4],xmm3[5],xmm13[5],xmm3[6],xmm13[6],xmm3[7],xmm13[7]
; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
@@ -8299,128 +8319,128 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm6, %xmm2
+; SSE-NEXT: movdqa %xmm12, %xmm4
+; SSE-NEXT: movdqa %xmm12, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,3,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: pand %xmm12, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: pand %xmm7, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm9
; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm7, %xmm0
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: movdqa %xmm15, %xmm12
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrlq $48, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,0,65535,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,0,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
; SSE-NEXT: pand %xmm5, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[3,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
-; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
+; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
-; SSE-NEXT: pand %xmm14, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3],xmm3[4],xmm13[4],xmm3[5],xmm13[5],xmm3[6],xmm13[6],xmm3[7],xmm13[7]
+; SSE-NEXT: pand %xmm6, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: pshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm10, %xmm7
-; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm7, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: pand %xmm7, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm13, %xmm10
-; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
+; SSE-NEXT: movdqa %xmm15, %xmm0
; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: pand %xmm13, %xmm1
+; SSE-NEXT: pand %xmm15, %xmm1
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm0
+; SSE-NEXT: movdqa %xmm12, %xmm0
; SSE-NEXT: pandn %xmm11, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrlq $48, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: pandn %xmm15, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,0,65535,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pand %xmm10, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
; SSE-NEXT: pand %xmm5, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[3,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
-; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
+; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
-; SSE-NEXT: pand %xmm14, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3],xmm3[4],xmm13[4],xmm3[5],xmm13[5],xmm3[6],xmm13[6],xmm3[7],xmm13[7]
+; SSE-NEXT: pand %xmm6, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: pshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[2,1,2,3,4,5,6,7]
@@ -8433,57 +8453,60 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm0, %xmm0
; SSE-NEXT: pand %xmm7, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm15, %xmm0
; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: pand %xmm13, %xmm1
+; SSE-NEXT: pand %xmm15, %xmm1
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm0
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: pandn %xmm8, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pand %xmm9, %xmm1
-; SSE-NEXT: movdqa %xmm9, %xmm13
+; SSE-NEXT: pand %xmm12, %xmm1
+; SSE-NEXT: movdqa %xmm12, %xmm8
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrlq $48, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm12
+; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm9, %xmm2
-; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pandn %xmm10, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm11, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
; SSE-NEXT: pand %xmm5, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[3,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: pandn %xmm11, %xmm0
+; SSE-NEXT: pandn %xmm12, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
-; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
+; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
-; SSE-NEXT: pand %xmm14, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3],xmm3[4],xmm13[4],xmm3[5],xmm13[5],xmm3[6],xmm13[6],xmm3[7],xmm13[7]
+; SSE-NEXT: pand %xmm6, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: pshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[2,1,2,3,4,5,6,7]
@@ -8496,75 +8519,130 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm0, %xmm0
; SSE-NEXT: pand %xmm7, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: movdqa %xmm15, %xmm0
; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: pand %xmm15, %xmm1
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: pandn %xmm9, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1],xmm2[2],xmm13[2],xmm2[3],xmm13[3],xmm2[4],xmm13[4],xmm2[5],xmm13[5],xmm2[6],xmm13[6],xmm2[7],xmm13[7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
; SSE-NEXT: psrlq $48, %xmm0
; SSE-NEXT: packuswb %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pandn %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pandn %xmm8, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3],xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7]
; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
; SSE-NEXT: pand %xmm5, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[3,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: pand %xmm6, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm6
-; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3],xmm3[4],xmm13[4],xmm3[5],xmm13[5],xmm3[6],xmm13[6],xmm3[7],xmm13[7]
+; SSE-NEXT: pand %xmm6, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: por %xmm3, %xmm6
+; SSE-NEXT: pshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: # xmm0 = mem[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: movdqa %xmm7, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[0,2,1,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,6,7]
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm15, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm2
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
-; SSE-NEXT: pand %xmm14, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm14
-; SSE-NEXT: por %xmm3, %xmm14
-; SSE-NEXT: pshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: # xmm1 = mem[2,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: packuswb %xmm0, %xmm2
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm14[0,2,1,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,3,3]
+; SSE-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3],xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm13[8],xmm2[9],xmm13[9],xmm2[10],xmm13[10],xmm2[11],xmm13[11],xmm2[12],xmm13[12],xmm2[13],xmm13[13],xmm2[14],xmm13[14],xmm2[15],xmm13[15]
+; SSE-NEXT: pand %xmm5, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pshufd $100, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: # xmm1 = mem[0,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
+; SSE-NEXT: packuswb %xmm1, %xmm1
+; SSE-NEXT: movdqa %xmm7, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: pand %xmm7, %xmm1
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm15, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
+; SSE-NEXT: andps %xmm15, %xmm0
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,0,65535,65535,0,65535,65535]
-; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm6, %xmm13
+; SSE-NEXT: movdqa %xmm6, %xmm0
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm6, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: pxor %xmm2, %xmm2
@@ -8585,115 +8663,64 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,3,3]
; SSE-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: pand %xmm12, %xmm2
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE-NEXT: movdqa %xmm5, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
-; SSE-NEXT: pand %xmm5, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm5, %xmm4
+; SSE-NEXT: pandn %xmm1, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
+; SSE-NEXT: pand %xmm5, %xmm3
+; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: pshufd $100, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm7, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,1,0,3]
+; SSE-NEXT: movdqa %xmm7, %xmm4
+; SSE-NEXT: pandn %xmm1, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: pand %xmm7, %xmm1
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: pandn %xmm3, %xmm1
-; SSE-NEXT: andps %xmm10, %xmm0
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: pandn %xmm15, %xmm0
-; SSE-NEXT: pand %xmm8, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pandn %xmm0, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: packuswb %xmm0, %xmm3
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,3,3]
-; SSE-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm15, %xmm3
-; SSE-NEXT: pand %xmm12, %xmm3
-; SSE-NEXT: por %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE-NEXT: movdqa %xmm5, %xmm4
-; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; SSE-NEXT: pand %xmm5, %xmm3
-; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: pshufd $100, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: # xmm2 = mem[0,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,2,1,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
-; SSE-NEXT: packuswb %xmm2, %xmm2
-; SSE-NEXT: pand %xmm7, %xmm2
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: por %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm15, %xmm1
; SSE-NEXT: pandn %xmm4, %xmm1
-; SSE-NEXT: andps %xmm10, %xmm0
+; SSE-NEXT: andps %xmm15, %xmm0
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: pand %xmm8, %xmm9
-; SSE-NEXT: por %xmm0, %xmm9
-; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm11
+; SSE-NEXT: por %xmm0, %xmm11
+; SSE-NEXT: movdqa %xmm11, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3],xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[2,1,2,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3],xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: pandn %xmm0, %xmm6
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pandn %xmm0, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: por %xmm4, %xmm2
-; SSE-NEXT: packuswb %xmm0, %xmm2
+; SSE-NEXT: por %xmm6, %xmm4
+; SSE-NEXT: packuswb %xmm0, %xmm4
; SSE-NEXT: packuswb %xmm3, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,1,3,3]
; SSE-NEXT: movss {{.*#+}} xmm0 = xmm3[0],xmm0[1,2,3]
-; SSE-NEXT: movdqa %xmm12, %xmm3
-; SSE-NEXT: pandn %xmm11, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: pand %xmm12, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,0,65535,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: pandn %xmm12, %xmm3
+; SSE-NEXT: movdqa (%rsp), %xmm4 # 16-byte Reload
+; SSE-NEXT: pand %xmm10, %xmm4
; SSE-NEXT: por %xmm3, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
@@ -8714,15 +8741,15 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm3, %xmm3
; SSE-NEXT: pand %xmm7, %xmm3
; SSE-NEXT: por %xmm3, %xmm6
-; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm15, %xmm1
; SSE-NEXT: pandn %xmm6, %xmm1
-; SSE-NEXT: andps %xmm10, %xmm0
+; SSE-NEXT: andps %xmm15, %xmm0
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: pandn %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: pandn %xmm8, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: pand %xmm13, %xmm4
; SSE-NEXT: por %xmm0, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm1
@@ -8743,8 +8770,8 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,3,3]
; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm12, %xmm3
-; SSE-NEXT: pand %xmm12, %xmm4
+; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: pand %xmm10, %xmm4
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm4
@@ -8765,144 +8792,143 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm5, %xmm5
; SSE-NEXT: pandn %xmm5, %xmm7
; SSE-NEXT: por %xmm4, %xmm7
-; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: movdqa %xmm15, %xmm3
; SSE-NEXT: pandn %xmm7, %xmm3
-; SSE-NEXT: andps %xmm10, %xmm0
+; SSE-NEXT: andps %xmm15, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,65535,65535,0,65535,65535,0,65535]
-; SSE-NEXT: pand %xmm13, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,0,65535,65535,0,65535]
+; SSE-NEXT: pand %xmm10, %xmm4
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm4, %xmm6
-; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm12[8],xmm6[9],xmm12[9],xmm6[10],xmm12[10],xmm6[11],xmm12[11],xmm6[12],xmm12[12],xmm6[13],xmm12[13],xmm6[14],xmm12[14],xmm6[15],xmm12[15]
+; SSE-NEXT: movdqa %xmm4, %xmm7
+; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm12[8],xmm7[9],xmm12[9],xmm7[10],xmm12[10],xmm7[11],xmm12[11],xmm7[12],xmm12[12],xmm7[13],xmm12[13],xmm7[14],xmm12[14],xmm7[15],xmm12[15]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,65535,65535,65535,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm0, %xmm7
-; SSE-NEXT: pandn %xmm6, %xmm7
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm7, %xmm6
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: por %xmm7, %xmm4
+; SSE-NEXT: por %xmm6, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm4[0,1,2,3,5,4,7,6]
-; SSE-NEXT: psrldq {{.*#+}} xmm6 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: packuswb %xmm6, %xmm7
-; SSE-NEXT: movdqa %xmm13, %xmm3
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm4[0,1,2,3,5,4,7,6]
+; SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: movdqa %xmm10, %xmm3
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: pandn %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,3,2,3]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT: # xmm6 = mem[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; SSE-NEXT: # xmm7 = mem[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
+; SSE-NEXT: movdqa %xmm7, %xmm4
; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm12[8],xmm4[9],xmm12[9],xmm4[10],xmm12[10],xmm4[11],xmm12[11],xmm4[12],xmm12[12],xmm4[13],xmm12[13],xmm4[14],xmm12[14],xmm4[15],xmm12[15]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; SSE-NEXT: packuswb %xmm6, %xmm6
-; SSE-NEXT: movss {{.*#+}} xmm7 = xmm6[0],xmm7[1,2,3]
-; SSE-NEXT: movdqa %xmm8, %xmm1
-; SSE-NEXT: movdqa %xmm8, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3],xmm7[4],xmm12[4],xmm7[5],xmm12[5],xmm7[6],xmm12[6],xmm7[7],xmm12[7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE-NEXT: packuswb %xmm7, %xmm7
+; SSE-NEXT: movss {{.*#+}} xmm6 = xmm7[0],xmm6[1,2,3]
+; SSE-NEXT: movdqa %xmm13, %xmm11
+; SSE-NEXT: movdqa %xmm13, %xmm4
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm14, %xmm8
-; SSE-NEXT: pand %xmm1, %xmm8
-; SSE-NEXT: movdqa %xmm1, %xmm14
+; SSE-NEXT: movdqa %xmm9, %xmm8
+; SSE-NEXT: pand %xmm13, %xmm8
; SSE-NEXT: por %xmm4, %xmm8
; SSE-NEXT: movdqa %xmm8, %xmm4
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,0,65535,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: pandn %xmm4, %xmm6
+; SSE-NEXT: movdqa %xmm1, %xmm7
+; SSE-NEXT: pandn %xmm4, %xmm7
; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm12[8],xmm8[9],xmm12[9],xmm8[10],xmm12[10],xmm8[11],xmm12[11],xmm8[12],xmm12[12],xmm8[13],xmm12[13],xmm8[14],xmm12[14],xmm8[15],xmm12[15]
; SSE-NEXT: pand %xmm1, %xmm8
-; SSE-NEXT: por %xmm6, %xmm8
+; SSE-NEXT: por %xmm7, %xmm8
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,7,6]
; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
-; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
+; SSE-NEXT: movdqa %xmm7, %xmm9
; SSE-NEXT: pandn %xmm4, %xmm9
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm4, %xmm4
-; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
; SSE-NEXT: por %xmm4, %xmm9
-; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: movdqa %xmm15, %xmm3
; SSE-NEXT: pandn %xmm9, %xmm3
-; SSE-NEXT: andps %xmm10, %xmm7
-; SSE-NEXT: movdqa %xmm10, %xmm5
-; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: andps %xmm15, %xmm6
+; SSE-NEXT: por %xmm6, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm13, %xmm10
-; SSE-NEXT: pand %xmm13, %xmm7
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm7, %xmm8
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: pand %xmm10, %xmm6
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm6, %xmm8
; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm12[8],xmm8[9],xmm12[9],xmm8[10],xmm12[10],xmm8[11],xmm12[11],xmm8[12],xmm12[12],xmm8[13],xmm12[13],xmm8[14],xmm12[14],xmm8[15],xmm12[15]
; SSE-NEXT: movdqa %xmm0, %xmm9
; SSE-NEXT: pandn %xmm8, %xmm9
-; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3],xmm7[4],xmm12[4],xmm7[5],xmm12[5],xmm7[6],xmm12[6],xmm7[7],xmm12[7]
-; SSE-NEXT: pand %xmm0, %xmm7
-; SSE-NEXT: por %xmm9, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm7[0,1,2,3,5,4,7,6]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
+; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: por %xmm9, %xmm6
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm6[0,1,2,3,5,4,7,6]
; SSE-NEXT: psrldq {{.*#+}} xmm8 = xmm8[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: packuswb %xmm8, %xmm9
-; SSE-NEXT: movdqa %xmm13, %xmm4
+; SSE-NEXT: movdqa %xmm10, %xmm6
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,3,2,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
-; SSE-NEXT: movdqa %xmm8, %xmm7
-; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm12[8],xmm7[9],xmm12[9],xmm7[10],xmm12[10],xmm7[11],xmm12[11],xmm7[12],xmm12[12],xmm7[13],xmm12[13],xmm7[14],xmm12[14],xmm7[15],xmm12[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pandn %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,3,2,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm10[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
+; SSE-NEXT: movdqa %xmm8, %xmm6
+; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm12[8],xmm6[9],xmm12[9],xmm6[10],xmm12[10],xmm6[11],xmm12[11],xmm6[12],xmm12[12],xmm6[13],xmm12[13],xmm6[14],xmm12[14],xmm6[15],xmm12[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7]
; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1],xmm8[2],xmm12[2],xmm8[3],xmm12[3],xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,3,2,3,4,5,6,7]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
; SSE-NEXT: packuswb %xmm8, %xmm8
; SSE-NEXT: movss {{.*#+}} xmm9 = xmm8[0],xmm9[1,2,3]
-; SSE-NEXT: movdqa %xmm14, %xmm7
-; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm15, %xmm8
-; SSE-NEXT: pand %xmm14, %xmm8
-; SSE-NEXT: por %xmm7, %xmm8
-; SSE-NEXT: movdqa %xmm8, %xmm7
-; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3],xmm7[4],xmm12[4],xmm7[5],xmm12[5],xmm7[6],xmm12[6],xmm7[7],xmm12[7]
+; SSE-NEXT: movdqa %xmm13, %xmm6
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm2, %xmm8
+; SSE-NEXT: pand %xmm13, %xmm8
+; SSE-NEXT: por %xmm6, %xmm8
+; SSE-NEXT: movdqa %xmm8, %xmm6
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
; SSE-NEXT: movdqa %xmm1, %xmm13
-; SSE-NEXT: pandn %xmm7, %xmm13
+; SSE-NEXT: pandn %xmm6, %xmm13
; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm12[8],xmm8[9],xmm12[9],xmm8[10],xmm12[10],xmm8[11],xmm12[11],xmm8[12],xmm12[12],xmm8[13],xmm12[13],xmm8[14],xmm12[14],xmm8[15],xmm12[15]
; SSE-NEXT: pand %xmm1, %xmm8
; SSE-NEXT: por %xmm13, %xmm8
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,7,6]
-; SSE-NEXT: packuswb %xmm7, %xmm7
-; SSE-NEXT: movdqa %xmm6, %xmm13
-; SSE-NEXT: pandn %xmm7, %xmm13
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,3,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,5,5]
-; SSE-NEXT: packuswb %xmm7, %xmm7
-; SSE-NEXT: pand %xmm6, %xmm7
-; SSE-NEXT: por %xmm7, %xmm13
-; SSE-NEXT: movdqa %xmm5, %xmm7
-; SSE-NEXT: pandn %xmm13, %xmm7
-; SSE-NEXT: andps %xmm5, %xmm9
-; SSE-NEXT: por %xmm9, %xmm7
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,7,6]
+; SSE-NEXT: packuswb %xmm6, %xmm6
+; SSE-NEXT: movdqa %xmm7, %xmm13
+; SSE-NEXT: pandn %xmm6, %xmm13
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm8[2,1,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,0,3,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
+; SSE-NEXT: packuswb %xmm6, %xmm6
+; SSE-NEXT: pand %xmm7, %xmm6
+; SSE-NEXT: por %xmm6, %xmm13
+; SSE-NEXT: movdqa %xmm15, %xmm6
+; SSE-NEXT: pandn %xmm13, %xmm6
+; SSE-NEXT: andps %xmm15, %xmm9
+; SSE-NEXT: movdqa %xmm15, %xmm5
+; SSE-NEXT: por %xmm9, %xmm6
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: pand %xmm10, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm8, %xmm9
; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm12[8],xmm9[9],xmm12[9],xmm9[10],xmm12[10],xmm9[11],xmm12[11],xmm9[12],xmm12[12],xmm9[13],xmm12[13],xmm9[14],xmm12[14],xmm9[15],xmm12[15]
@@ -8915,10 +8941,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm8[0,1,2,3,5,4,7,6]
; SSE-NEXT: psrldq {{.*#+}} xmm9 = xmm9[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: packuswb %xmm9, %xmm15
-; SSE-NEXT: movdqa %xmm10, %xmm13
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pandn %xmm3, %xmm13
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[1,3,2,3]
+; SSE-NEXT: movdqa %xmm4, %xmm13
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pandn %xmm2, %xmm13
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm2[1,3,2,3]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm3[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
@@ -8932,10 +8958,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
; SSE-NEXT: packuswb %xmm9, %xmm9
; SSE-NEXT: movss {{.*#+}} xmm15 = xmm9[0],xmm15[1,2,3]
-; SSE-NEXT: movdqa %xmm14, %xmm8
-; SSE-NEXT: pandn %xmm11, %xmm8
-; SSE-NEXT: movdqa %xmm2, %xmm9
-; SSE-NEXT: pand %xmm14, %xmm9
+; SSE-NEXT: movdqa %xmm11, %xmm8
+; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; SSE-NEXT: movdqa (%rsp), %xmm9 # 16-byte Reload
+; SSE-NEXT: pand %xmm11, %xmm9
; SSE-NEXT: por %xmm8, %xmm9
; SSE-NEXT: movdqa %xmm9, %xmm8
; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1],xmm8[2],xmm12[2],xmm8[3],xmm12[3],xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
@@ -8949,20 +8975,20 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm2[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,7,6]
; SSE-NEXT: packuswb %xmm8, %xmm8
-; SSE-NEXT: movdqa %xmm6, %xmm11
+; SSE-NEXT: movdqa %xmm7, %xmm11
; SSE-NEXT: pandn %xmm8, %xmm11
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm9[2,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm8, %xmm8
-; SSE-NEXT: pand %xmm6, %xmm8
+; SSE-NEXT: pand %xmm7, %xmm8
; SSE-NEXT: por %xmm8, %xmm11
; SSE-NEXT: movdqa %xmm5, %xmm9
; SSE-NEXT: pandn %xmm11, %xmm9
; SSE-NEXT: andps %xmm5, %xmm15
; SSE-NEXT: por %xmm15, %xmm9
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: pand %xmm10, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
; SSE-NEXT: movdqa %xmm8, %xmm11
; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
@@ -8974,15 +9000,16 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
; SSE-NEXT: psrldq {{.*#+}} xmm11 = xmm11[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: packuswb %xmm11, %xmm0
-; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: pand %xmm10, %xmm15
-; SSE-NEXT: pand %xmm10, %xmm4
-; SSE-NEXT: pand %xmm10, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm15
+; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,2,2,3]
-; SSE-NEXT: pand %xmm10, %xmm3
+; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: pandn %xmm3, %xmm2
@@ -8999,8 +9026,8 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: packuswb %xmm8, %xmm8
; SSE-NEXT: movss {{.*#+}} xmm0 = xmm8[0],xmm0[1,2,3]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pand %xmm14, %xmm8
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
+; SSE-NEXT: pand %xmm3, %xmm8
; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: por %xmm8, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm8
@@ -9009,18 +9036,17 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pand %xmm1, %xmm3
; SSE-NEXT: pandn %xmm8, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,1,0,3]
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm14[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,7,6]
; SSE-NEXT: packuswb %xmm8, %xmm8
-; SSE-NEXT: movdqa %xmm6, %xmm14
+; SSE-NEXT: movdqa %xmm7, %xmm14
; SSE-NEXT: pandn %xmm8, %xmm14
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,3,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: pand %xmm7, %xmm1
; SSE-NEXT: por %xmm1, %xmm14
; SSE-NEXT: movdqa %xmm5, %xmm11
; SSE-NEXT: pandn %xmm14, %xmm11
@@ -9056,7 +9082,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm6, %xmm8
+; SSE-NEXT: movdqa %xmm7, %xmm8
; SSE-NEXT: pandn %xmm0, %xmm8
; SSE-NEXT: pshufd $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[1,3,2,3]
@@ -9074,26 +9100,27 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,1,1,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm14
-; SSE-NEXT: pand %xmm6, %xmm14
+; SSE-NEXT: pand %xmm7, %xmm14
; SSE-NEXT: por %xmm8, %xmm14
; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: pandn %xmm14, %xmm3
; SSE-NEXT: andps %xmm5, %xmm1
; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm4, %xmm8
+; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; SSE-NEXT: movdqa %xmm8, %xmm1
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
-; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[0,1,2,1]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
; SSE-NEXT: pandn %xmm4, %xmm12
-; SSE-NEXT: movdqa (%rsp), %xmm8 # 16-byte Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; SSE-NEXT: por %xmm12, %xmm8
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,7,6,7]
@@ -9108,7 +9135,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm6, %xmm12
+; SSE-NEXT: movdqa %xmm7, %xmm12
; SSE-NEXT: pandn %xmm1, %xmm12
; SSE-NEXT: pshufd $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[1,3,2,3]
@@ -9125,7 +9152,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,1,1,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: pand %xmm7, %xmm1
; SSE-NEXT: por %xmm12, %xmm1
; SSE-NEXT: movdqa %xmm5, %xmm12
; SSE-NEXT: pandn %xmm1, %xmm12
@@ -9162,11 +9189,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm6, %xmm13
+; SSE-NEXT: movdqa %xmm7, %xmm13
; SSE-NEXT: pandn %xmm1, %xmm13
; SSE-NEXT: pshufd $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[1,3,2,3]
-; SSE-NEXT: pshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
+; SSE-NEXT: pshufd $232, (%rsp), %xmm14 # 16-byte Folded Reload
; SSE-NEXT: # xmm14 = mem[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1]
; SSE-NEXT: movdqa %xmm14, %xmm1
@@ -9179,7 +9206,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,1,1,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm6, %xmm1
+; SSE-NEXT: pand %xmm7, %xmm1
; SSE-NEXT: por %xmm13, %xmm1
; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: movdqa %xmm4, %xmm13
@@ -9223,17 +9250,17 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: pand %xmm10, %xmm8
; SSE-NEXT: pandn %xmm1, %xmm10
; SSE-NEXT: por %xmm8, %xmm10
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[2,1,1,1]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
-; SSE-NEXT: pand %xmm6, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm2[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,4,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm10[2,1,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm8, %xmm8
-; SSE-NEXT: pandn %xmm8, %xmm6
-; SSE-NEXT: por %xmm6, %xmm1
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: pandn %xmm1, %xmm7
+; SSE-NEXT: por %xmm8, %xmm7
; SSE-NEXT: andps %xmm0, %xmm14
-; SSE-NEXT: pandn %xmm1, %xmm0
+; SSE-NEXT: pandn %xmm7, %xmm0
; SSE-NEXT: por %xmm14, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -9279,7 +9306,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE-NEXT: movdqa %xmm11, (%rax)
; SSE-NEXT: movdqa %xmm9, 48(%rax)
-; SSE-NEXT: movdqa %xmm7, 32(%rax)
+; SSE-NEXT: movdqa %xmm6, 32(%rax)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%rax)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
@@ -9287,7 +9314,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-NEXT: movdqa %xmm13, 48(%rax)
; SSE-NEXT: movdqa %xmm12, 32(%rax)
; SSE-NEXT: movdqa %xmm3, 16(%rax)
-; SSE-NEXT: addq $1528, %rsp # imm = 0x5F8
+; SSE-NEXT: addq $1512, %rsp # imm = 0x5E8
; SSE-NEXT: retq
;
; AVX-LABEL: load_i8_stride7_vf64:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index c1e7f1e8c6c725..1715879de1fac0 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -2132,397 +2132,394 @@ define void @store_i16_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind {
; SSE-LABEL: store_i16_stride5_vf32:
; SSE: # %bb.0:
-; SSE-NEXT: subq $248, %rsp
-; SSE-NEXT: movdqa (%rdi), %xmm5
-; SSE-NEXT: movdqa 16(%rdi), %xmm9
-; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa (%rsi), %xmm6
-; SSE-NEXT: movdqa 16(%rsi), %xmm12
+; SSE-NEXT: subq $232, %rsp
+; SSE-NEXT: movdqa (%rdi), %xmm9
+; SSE-NEXT: movdqa 16(%rdi), %xmm12
; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa (%rdx), %xmm2
-; SSE-NEXT: movdqa (%rcx), %xmm8
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa (%rsi), %xmm7
+; SSE-NEXT: movdqa 16(%rsi), %xmm13
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa (%rdx), %xmm4
+; SSE-NEXT: movdqa (%rcx), %xmm10
; SSE-NEXT: movdqa 16(%rcx), %xmm14
; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa (%r8), %xmm0
+; SSE-NEXT: movdqa (%r8), %xmm15
+; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: pandn %xmm9, %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,0,0,65535,65535]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,2,2]
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,0,65535,65535,65535,65535,0]
+; SSE-NEXT: pand %xmm11, %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pandn %xmm5, %xmm11
+; SSE-NEXT: por %xmm2, %xmm11
+; SSE-NEXT: pand %xmm1, %xmm11
+; SSE-NEXT: por %xmm3, %xmm11
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,0,65535,65535,65535,65535]
+; SSE-NEXT: pand %xmm8, %xmm11
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: pandn %xmm15, %xmm0
+; SSE-NEXT: por %xmm11, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,65535,65535,65535,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm15, %xmm1
-; SSE-NEXT: pandn %xmm5, %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm6[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm6, %xmm2
+; SSE-NEXT: pandn %xmm12, %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm13[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm15, %xmm3
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,0,0,65535,65535]
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,2,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [65535,65535,0,65535,65535,65535,65535,0]
-; SSE-NEXT: pand %xmm13, %xmm7
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm13, %xmm11
-; SSE-NEXT: pandn %xmm8, %xmm11
-; SSE-NEXT: por %xmm7, %xmm11
+; SSE-NEXT: pand %xmm6, %xmm3
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm5
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm14[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: movdqa 16(%rdx), %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm2[1,1,2,2]
+; SSE-NEXT: pand %xmm0, %xmm11
+; SSE-NEXT: movdqa %xmm0, %xmm12
+; SSE-NEXT: por %xmm11, %xmm3
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: por %xmm5, %xmm3
+; SSE-NEXT: movdqa 16(%r8), %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: pandn %xmm5, %xmm0
+; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 32(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa 32(%rsi), %xmm0
+; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm6, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm5, %xmm3
+; SSE-NEXT: movdqa 32(%rcx), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: movdqa %xmm12, %xmm11
+; SSE-NEXT: pandn %xmm5, %xmm11
+; SSE-NEXT: movdqa 32(%rdx), %xmm14
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm14[1,1,2,2]
+; SSE-NEXT: pand %xmm0, %xmm12
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: por %xmm12, %xmm11
; SSE-NEXT: pand %xmm1, %xmm11
-; SSE-NEXT: por %xmm4, %xmm11
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT: pand %xmm10, %xmm11
-; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: por %xmm3, %xmm11
+; SSE-NEXT: pand %xmm8, %xmm11
+; SSE-NEXT: movdqa 32(%r8), %xmm12
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: pandn %xmm12, %xmm0
+; SSE-NEXT: por %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 48(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: por %xmm11, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm4
-; SSE-NEXT: pandn %xmm9, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm12[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm15, %xmm7
-; SSE-NEXT: por %xmm4, %xmm7
-; SSE-NEXT: movdqa %xmm1, %xmm8
-; SSE-NEXT: pandn %xmm7, %xmm8
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm14[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm13, %xmm7
-; SSE-NEXT: pandn %xmm4, %xmm7
-; SSE-NEXT: movdqa 16(%rdx), %xmm14
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm14[1,1,2,2]
-; SSE-NEXT: pand %xmm13, %xmm11
-; SSE-NEXT: por %xmm11, %xmm7
-; SSE-NEXT: pand %xmm1, %xmm7
-; SSE-NEXT: por %xmm8, %xmm7
-; SSE-NEXT: movdqa 16(%r8), %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm10, %xmm7
-; SSE-NEXT: movdqa %xmm10, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm3
-; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 32(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm7
-; SSE-NEXT: pandn %xmm3, %xmm7
-; SSE-NEXT: movdqa 32(%rsi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm15, %xmm8
-; SSE-NEXT: por %xmm7, %xmm8
-; SSE-NEXT: movdqa %xmm1, %xmm7
-; SSE-NEXT: pandn %xmm8, %xmm7
-; SSE-NEXT: movdqa 32(%rcx), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm13, %xmm12
-; SSE-NEXT: pandn %xmm8, %xmm12
-; SSE-NEXT: movdqa 32(%rdx), %xmm11
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm11[1,1,2,2]
-; SSE-NEXT: pand %xmm13, %xmm8
-; SSE-NEXT: por %xmm8, %xmm12
-; SSE-NEXT: pand %xmm1, %xmm12
-; SSE-NEXT: por %xmm7, %xmm12
-; SSE-NEXT: pand %xmm10, %xmm12
-; SSE-NEXT: movdqa 32(%r8), %xmm7
-; SSE-NEXT: movdqa %xmm7, (%rsp) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm10, %xmm3
-; SSE-NEXT: pandn %xmm7, %xmm3
-; SSE-NEXT: por %xmm12, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 48(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm7
-; SSE-NEXT: pandn %xmm3, %xmm7
-; SSE-NEXT: movdqa 48(%rsi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm15, %xmm8
-; SSE-NEXT: por %xmm7, %xmm8
-; SSE-NEXT: movdqa 48(%rcx), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm3[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm13, %xmm12
-; SSE-NEXT: pandn %xmm7, %xmm12
-; SSE-NEXT: movdqa 48(%rdx), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,2,2]
-; SSE-NEXT: pand %xmm13, %xmm7
-; SSE-NEXT: por %xmm7, %xmm12
-; SSE-NEXT: pand %xmm1, %xmm12
-; SSE-NEXT: pandn %xmm8, %xmm1
-; SSE-NEXT: por %xmm12, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: movdqa 48(%r8), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm3, %xmm10
-; SSE-NEXT: por %xmm1, %xmm10
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE-NEXT: movdqa 48(%rsi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm6, %xmm11
+; SSE-NEXT: por %xmm3, %xmm11
+; SSE-NEXT: movdqa 48(%rcx), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm5, %xmm15
+; SSE-NEXT: pandn %xmm3, %xmm15
+; SSE-NEXT: movdqa 48(%rdx), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
+; SSE-NEXT: pand %xmm5, %xmm3
+; SSE-NEXT: por %xmm3, %xmm15
+; SSE-NEXT: pand %xmm1, %xmm15
+; SSE-NEXT: pandn %xmm11, %xmm1
+; SSE-NEXT: por %xmm15, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: movdqa 48(%r8), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm0, %xmm8
+; SSE-NEXT: por %xmm1, %xmm8
+; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,0,2,1]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: pandn %xmm7, %xmm8
-; SSE-NEXT: movdqa %xmm5, %xmm7
-; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm7[0,1,3,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,1,1,1]
-; SSE-NEXT: pand %xmm3, %xmm12
-; SSE-NEXT: por %xmm8, %xmm12
-; SSE-NEXT: pand %xmm15, %xmm12
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[0,1,0,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,2,1]
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,0,0,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm5, %xmm11
+; SSE-NEXT: pandn %xmm3, %xmm11
+; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm3[0,1,3,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,1,1,1]
+; SSE-NEXT: pand %xmm5, %xmm15
+; SSE-NEXT: por %xmm11, %xmm15
+; SSE-NEXT: pand %xmm6, %xmm15
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm8[0,1,0,1]
+; SSE-NEXT: pandn %xmm11, %xmm6
+; SSE-NEXT: por %xmm15, %xmm6
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE-NEXT: movdqa %xmm10, %xmm15
+; SSE-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm15[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,4,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,0,65535,65535,65535,65535,0,65535]
; SSE-NEXT: movdqa %xmm15, %xmm1
-; SSE-NEXT: pandn %xmm8, %xmm1
-; SSE-NEXT: por %xmm12, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
-; SSE-NEXT: movdqa %xmm0, %xmm12
-; SSE-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,4,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm7[0],xmm12[1],xmm7[1]
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm8, %xmm1
-; SSE-NEXT: pand %xmm9, %xmm12
-; SSE-NEXT: por %xmm12, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE-NEXT: movdqa %xmm2, %xmm7
-; SSE-NEXT: pslldq {{.*#+}} xmm7 = zero,zero,xmm7[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm0, %xmm8
-; SSE-NEXT: pandn %xmm7, %xmm8
-; SSE-NEXT: movdqa %xmm5, %xmm7
-; SSE-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm7[0,1,2,2]
-; SSE-NEXT: pand %xmm0, %xmm12
-; SSE-NEXT: por %xmm8, %xmm12
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [0,65535,65535,65535,65535,0,65535,65535]
-; SSE-NEXT: pand %xmm10, %xmm12
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[2,3,2,3]
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: pandn %xmm8, %xmm1
-; SSE-NEXT: por %xmm12, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: psrlq $48, %xmm6
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1]
-; SSE-NEXT: movdqa %xmm0, %xmm6
-; SSE-NEXT: pandn %xmm5, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: por %xmm6, %xmm2
-; SSE-NEXT: movdqa %xmm13, %xmm1
-; SSE-NEXT: pandn %xmm8, %xmm1
-; SSE-NEXT: pand %xmm13, %xmm2
-; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: pandn %xmm11, %xmm1
+; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT: movdqa %xmm3, %xmm5
-; SSE-NEXT: pandn %xmm2, %xmm5
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm2[0,1,3,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,1,1]
-; SSE-NEXT: pand %xmm3, %xmm6
-; SSE-NEXT: por %xmm5, %xmm6
-; SSE-NEXT: pand %xmm15, %xmm6
+; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,4,5,7,6]
+; SSE-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,xmm4[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pandn %xmm4, %xmm10
+; SSE-NEXT: movdqa %xmm9, %xmm4
+; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm4[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm11
+; SSE-NEXT: por %xmm10, %xmm11
+; SSE-NEXT: movdqa {{.*#+}} xmm13 = [0,65535,65535,65535,65535,0,65535,65535]
+; SSE-NEXT: pand %xmm13, %xmm11
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm8[2,3,2,3]
+; SSE-NEXT: movdqa %xmm13, %xmm0
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: por %xmm11, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: psrlq $48, %xmm7
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm7[1]
+; SSE-NEXT: movdqa %xmm1, %xmm7
+; SSE-NEXT: pandn %xmm9, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,2,1]
+; SSE-NEXT: movdqa %xmm5, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm6
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,1,0,1]
-; SSE-NEXT: movdqa %xmm15, %xmm12
-; SSE-NEXT: pandn %xmm5, %xmm12
-; SSE-NEXT: por %xmm6, %xmm12
-; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE-NEXT: movdqa %xmm8, %xmm6
-; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
-; SSE-NEXT: movdqa %xmm9, %xmm2
-; SSE-NEXT: pandn %xmm5, %xmm2
-; SSE-NEXT: pand %xmm9, %xmm6
-; SSE-NEXT: por %xmm6, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm8[4],xmm14[5],xmm8[5],xmm14[6],xmm8[6],xmm14[7],xmm8[7]
-; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm3[0,1,3,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,1,1]
+; SSE-NEXT: pand %xmm5, %xmm7
+; SSE-NEXT: por %xmm6, %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: pand %xmm0, %xmm7
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm10[0,1,0,1]
+; SSE-NEXT: movdqa %xmm0, %xmm11
+; SSE-NEXT: pandn %xmm6, %xmm11
+; SSE-NEXT: por %xmm7, %xmm11
+; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE-NEXT: movdqa %xmm9, %xmm7
+; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,4,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
+; SSE-NEXT: movdqa %xmm15, %xmm0
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm7
+; SSE-NEXT: por %xmm7, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,5,7,6]
; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: pandn %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
+; SSE-NEXT: movdqa %xmm1, %xmm6
+; SSE-NEXT: pandn %xmm2, %xmm6
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm2
+; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,3,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: por %xmm5, %xmm2
-; SSE-NEXT: pand %xmm10, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
-; SSE-NEXT: movdqa %xmm10, %xmm4
-; SSE-NEXT: pandn %xmm5, %xmm4
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm10[2,3,2,3]
+; SSE-NEXT: movdqa %xmm13, %xmm7
+; SSE-NEXT: pandn %xmm6, %xmm7
+; SSE-NEXT: por %xmm2, %xmm7
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm8, %xmm2
; SSE-NEXT: psrlq $48, %xmm2
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm14[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,3,3]
-; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm13, %xmm1
-; SSE-NEXT: pandn %xmm5, %xmm1
-; SSE-NEXT: pand %xmm13, %xmm4
-; SSE-NEXT: por %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pandn %xmm0, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,0,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: pand %xmm10, %xmm3
+; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: pandn %xmm2, %xmm4
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[0,1,3,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,1]
-; SSE-NEXT: pand %xmm3, %xmm5
-; SSE-NEXT: por %xmm4, %xmm5
-; SSE-NEXT: pand %xmm15, %xmm5
-; SSE-NEXT: movdqa (%rsp), %xmm12 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[0,1,0,1]
-; SSE-NEXT: movdqa %xmm15, %xmm4
-; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: por %xmm5, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE-NEXT: movdqa %xmm8, %xmm5
-; SSE-NEXT: movdqa %xmm8, %xmm4
-; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,4,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
-; SSE-NEXT: movdqa %xmm9, %xmm8
-; SSE-NEXT: pandn %xmm1, %xmm8
-; SSE-NEXT: pand %xmm9, %xmm5
-; SSE-NEXT: por %xmm5, %xmm8
-; SSE-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; SSE-NEXT: movdqa %xmm11, %xmm1
-; SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: pandn %xmm1, %xmm5
+; SSE-NEXT: movdqa (%rsp), %xmm8 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm2[0,1,3,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,1,1,1]
+; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[0,1,0,1]
+; SSE-NEXT: movdqa %xmm4, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm6
+; SSE-NEXT: por %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE-NEXT: movdqa %xmm15, %xmm11
+; SSE-NEXT: pandn %xmm3, %xmm11
+; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: por %xmm0, %xmm11
+; SSE-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm9[4],xmm14[5],xmm9[5],xmm14[6],xmm9[6],xmm14[7],xmm9[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm14[0,1,2,3,4,5,7,6]
+; SSE-NEXT: pslldq {{.*#+}} xmm14 = zero,zero,xmm14[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: pandn %xmm14, %xmm0
; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: por %xmm5, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm12[2,3,2,3]
-; SSE-NEXT: movdqa %xmm10, %xmm9
-; SSE-NEXT: pandn %xmm2, %xmm9
-; SSE-NEXT: por %xmm1, %xmm9
-; SSE-NEXT: movdqa %xmm6, %xmm1
-; SSE-NEXT: psrlq $48, %xmm1
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm1[1]
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn %xmm4, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[2,1,3,3]
-; SSE-NEXT: pand %xmm0, %xmm11
-; SSE-NEXT: por %xmm1, %xmm11
-; SSE-NEXT: movdqa %xmm13, %xmm14
-; SSE-NEXT: pandn %xmm2, %xmm14
-; SSE-NEXT: pand %xmm13, %xmm11
-; SSE-NEXT: por %xmm11, %xmm14
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm8, %xmm6
+; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm4
+; SSE-NEXT: por %xmm0, %xmm4
+; SSE-NEXT: pand %xmm13, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm12[2,3,2,3]
+; SSE-NEXT: movdqa %xmm13, %xmm8
+; SSE-NEXT: pandn %xmm14, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: psrlq $48, %xmm4
+; SSE-NEXT: movdqa %xmm7, %xmm0
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pandn %xmm0, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: por %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: pandn %xmm14, %xmm3
+; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm12, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm4[0,1,3,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[0,1,1,1]
+; SSE-NEXT: pand %xmm5, %xmm14
+; SSE-NEXT: pandn %xmm0, %xmm5
+; SSE-NEXT: por %xmm14, %xmm5
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: pand %xmm14, %xmm5
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm7, %xmm11
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm11[0,1,3,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,1,1,1]
-; SSE-NEXT: pand %xmm3, %xmm12
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: por %xmm12, %xmm3
-; SSE-NEXT: pand %xmm15, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,1,0,1]
-; SSE-NEXT: pandn %xmm2, %xmm15
-; SSE-NEXT: por %xmm3, %xmm15
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm11[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; SSE-NEXT: movdqa %xmm6, %xmm11
-; SSE-NEXT: movdqa %xmm1, %xmm12
-; SSE-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,4,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm3[0],xmm11[1],xmm3[1]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: pand %xmm1, %xmm11
-; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: por %xmm11, %xmm1
-; SSE-NEXT: movdqa %xmm12, %xmm3
-; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm3, %xmm6
-; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: pand %xmm10, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,3,2,3]
-; SSE-NEXT: pandn %xmm3, %xmm10
-; SSE-NEXT: por %xmm2, %xmm10
-; SSE-NEXT: psrlq $48, %xmm5
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm7 = xmm7[1],xmm5[1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: pandn %xmm7, %xmm0
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: pand %xmm13, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm13
-; SSE-NEXT: por %xmm0, %xmm13
-; SSE-NEXT: movdqa %xmm13, 304(%r9)
-; SSE-NEXT: movdqa %xmm10, 288(%r9)
-; SSE-NEXT: movdqa %xmm1, 256(%r9)
-; SSE-NEXT: movdqa %xmm15, 240(%r9)
-; SSE-NEXT: movdqa %xmm14, 224(%r9)
-; SSE-NEXT: movdqa %xmm9, 208(%r9)
-; SSE-NEXT: movdqa %xmm8, 176(%r9)
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,1,0,1]
+; SSE-NEXT: pandn %xmm2, %xmm14
+; SSE-NEXT: por %xmm5, %xmm14
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; SSE-NEXT: movdqa %xmm6, %xmm5
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,4,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; SSE-NEXT: pand %xmm15, %xmm5
+; SSE-NEXT: pandn %xmm2, %xmm15
+; SSE-NEXT: por %xmm5, %xmm15
+; SSE-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm12[0,1,2,3,4,5,7,6]
+; SSE-NEXT: pslldq {{.*#+}} xmm12 = zero,zero,xmm12[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pandn %xmm12, %xmm4
+; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: por %xmm4, %xmm5
+; SSE-NEXT: pand %xmm13, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[2,3,2,3]
+; SSE-NEXT: pandn %xmm4, %xmm13
+; SSE-NEXT: por %xmm5, %xmm13
+; SSE-NEXT: psrlq $48, %xmm9
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm10 = xmm10[1],xmm9[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: pandn %xmm10, %xmm1
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,65535,65535,0]
+; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: pandn %xmm4, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm0, 304(%r9)
+; SSE-NEXT: movdqa %xmm13, 288(%r9)
+; SSE-NEXT: movdqa %xmm15, 256(%r9)
+; SSE-NEXT: movdqa %xmm14, 240(%r9)
+; SSE-NEXT: movdqa %xmm3, 224(%r9)
+; SSE-NEXT: movdqa %xmm8, 208(%r9)
+; SSE-NEXT: movdqa %xmm11, 176(%r9)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 160(%r9)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -2549,7 +2546,7 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; SSE-NEXT: movaps %xmm0, 112(%r9)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 32(%r9)
-; SSE-NEXT: addq $248, %rsp
+; SSE-NEXT: addq $232, %rsp
; SSE-NEXT: retq
;
; AVX-LABEL: store_i16_stride5_vf32:
@@ -4210,812 +4207,800 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; SSE-LABEL: store_i16_stride5_vf64:
; SSE: # %bb.0:
; SSE-NEXT: subq $616, %rsp # imm = 0x268
-; SSE-NEXT: movdqa (%rdi), %xmm14
-; SSE-NEXT: movdqa 16(%rdi), %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa (%rsi), %xmm12
+; SSE-NEXT: movdqa (%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 16(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa (%rsi), %xmm13
; SSE-NEXT: movdqa 16(%rsi), %xmm11
; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa (%rdx), %xmm7
-; SSE-NEXT: movdqa (%rcx), %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 16(%rcx), %xmm13
-; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa (%r8), %xmm15
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,65535,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm14, %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm12[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm3
-; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa (%rcx), %xmm6
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 16(%rcx), %xmm12
+; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa (%r8), %xmm14
+; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm13[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm10, %xmm4
+; SSE-NEXT: por %xmm1, %xmm4
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,0,0,65535,65535]
; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: pandn %xmm3, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[1,1,2,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,0,65535,65535,65535,65535,0]
-; SSE-NEXT: pand %xmm10, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,2,2]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,65535,65535,0]
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm10, %xmm8
+; SSE-NEXT: movdqa %xmm2, %xmm8
; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: por %xmm3, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
; SSE-NEXT: pand %xmm1, %xmm8
; SSE-NEXT: por %xmm5, %xmm8
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT: pand %xmm2, %xmm8
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm15, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,0,65535,65535,65535,65535]
+; SSE-NEXT: pand %xmm9, %xmm8
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: pandn %xmm14, %xmm0
; SSE-NEXT: por %xmm8, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: pandn %xmm3, %xmm4
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm11[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm5
-; SSE-NEXT: por %xmm3, %xmm5
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pandn %xmm5, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm13[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: pand %xmm10, %xmm5
+; SSE-NEXT: por %xmm4, %xmm5
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm5, %xmm6
-; SSE-NEXT: movdqa 16(%rdx), %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm5[1,1,2,2]
-; SSE-NEXT: pand %xmm10, %xmm8
-; SSE-NEXT: por %xmm8, %xmm6
-; SSE-NEXT: pand %xmm1, %xmm6
-; SSE-NEXT: por %xmm3, %xmm6
-; SSE-NEXT: movdqa 16(%r8), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pand %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm12[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: movdqa 16(%rdx), %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[1,1,2,2]
+; SSE-NEXT: pand %xmm2, %xmm8
+; SSE-NEXT: por %xmm8, %xmm5
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: movdqa 16(%r8), %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm9, %xmm5
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: pandn %xmm4, %xmm0
+; SSE-NEXT: por %xmm5, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 32(%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: pandn %xmm0, %xmm5
; SSE-NEXT: movdqa 32(%rsi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm6
-; SSE-NEXT: por %xmm3, %xmm6
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pandn %xmm6, %xmm3
+; SSE-NEXT: pand %xmm10, %xmm6
+; SSE-NEXT: por %xmm5, %xmm6
+; SSE-NEXT: movdqa %xmm1, %xmm8
+; SSE-NEXT: pandn %xmm6, %xmm8
; SSE-NEXT: movdqa 32(%rcx), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm10, %xmm8
-; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: movdqa 32(%rdx), %xmm6
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm6[1,1,2,2]
-; SSE-NEXT: pand %xmm10, %xmm11
-; SSE-NEXT: por %xmm11, %xmm8
-; SSE-NEXT: pand %xmm1, %xmm8
-; SSE-NEXT: por %xmm3, %xmm8
-; SSE-NEXT: pand %xmm2, %xmm8
-; SSE-NEXT: movdqa 32(%r8), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: por %xmm8, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: pandn %xmm5, %xmm6
+; SSE-NEXT: movdqa 32(%rdx), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm0[1,1,2,2]
+; SSE-NEXT: pand %xmm2, %xmm11
+; SSE-NEXT: por %xmm11, %xmm6
+; SSE-NEXT: pand %xmm1, %xmm6
+; SSE-NEXT: por %xmm8, %xmm6
+; SSE-NEXT: pand %xmm9, %xmm6
+; SSE-NEXT: movdqa 32(%r8), %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: pandn %xmm4, %xmm0
+; SSE-NEXT: por %xmm6, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 48(%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: pandn %xmm0, %xmm6
; SSE-NEXT: movdqa 48(%rsi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm0[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm8
-; SSE-NEXT: por %xmm3, %xmm8
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pandn %xmm8, %xmm3
+; SSE-NEXT: pand %xmm10, %xmm8
+; SSE-NEXT: por %xmm6, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm11
+; SSE-NEXT: pandn %xmm8, %xmm11
; SSE-NEXT: movdqa 48(%rcx), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm0[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm10, %xmm11
-; SSE-NEXT: pandn %xmm8, %xmm11
-; SSE-NEXT: movdqa 48(%rdx), %xmm8
-; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm8[1,1,2,2]
-; SSE-NEXT: pand %xmm10, %xmm15
-; SSE-NEXT: por %xmm15, %xmm11
-; SSE-NEXT: pand %xmm1, %xmm11
-; SSE-NEXT: por %xmm3, %xmm11
-; SSE-NEXT: pand %xmm2, %xmm11
-; SSE-NEXT: movdqa 48(%r8), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: por %xmm11, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm2, %xmm8
+; SSE-NEXT: pandn %xmm6, %xmm8
+; SSE-NEXT: movdqa 48(%rdx), %xmm6
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm6[1,1,2,2]
+; SSE-NEXT: pand %xmm2, %xmm12
+; SSE-NEXT: por %xmm12, %xmm8
+; SSE-NEXT: pand %xmm1, %xmm8
+; SSE-NEXT: por %xmm11, %xmm8
+; SSE-NEXT: pand %xmm9, %xmm8
+; SSE-NEXT: movdqa 48(%r8), %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: pandn %xmm4, %xmm0
+; SSE-NEXT: por %xmm8, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 64(%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm10, %xmm8
+; SSE-NEXT: pandn %xmm0, %xmm8
; SSE-NEXT: movdqa 64(%rsi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm0[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm11
-; SSE-NEXT: por %xmm3, %xmm11
-; SSE-NEXT: movdqa %xmm1, %xmm15
-; SSE-NEXT: pandn %xmm11, %xmm15
+; SSE-NEXT: pand %xmm10, %xmm11
+; SSE-NEXT: por %xmm8, %xmm11
+; SSE-NEXT: movdqa %xmm1, %xmm12
+; SSE-NEXT: pandn %xmm11, %xmm12
; SSE-NEXT: movdqa 64(%rcx), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm10, %xmm11
-; SSE-NEXT: pandn %xmm3, %xmm11
-; SSE-NEXT: movdqa 64(%rdx), %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: por %xmm0, %xmm11
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm2, %xmm11
+; SSE-NEXT: pandn %xmm8, %xmm11
+; SSE-NEXT: movdqa 64(%rdx), %xmm8
+; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm8[1,1,2,2]
+; SSE-NEXT: pand %xmm2, %xmm14
+; SSE-NEXT: por %xmm14, %xmm11
; SSE-NEXT: pand %xmm1, %xmm11
-; SSE-NEXT: por %xmm15, %xmm11
-; SSE-NEXT: pand %xmm2, %xmm11
-; SSE-NEXT: movdqa 64(%r8), %xmm3
-; SSE-NEXT: movdqa %xmm3, (%rsp) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: por %xmm12, %xmm11
+; SSE-NEXT: pand %xmm9, %xmm11
+; SSE-NEXT: movdqa 64(%r8), %xmm4
+; SSE-NEXT: movdqa %xmm4, (%rsp) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: pandn %xmm4, %xmm0
; SSE-NEXT: por %xmm11, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 80(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: movdqa 80(%rsi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm3[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm11
-; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: pandn %xmm11, %xmm0
-; SSE-NEXT: movdqa 80(%rcx), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm3[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa 80(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, %xmm11
+; SSE-NEXT: pandn %xmm0, %xmm11
+; SSE-NEXT: movdqa 80(%rsi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm10, %xmm12
+; SSE-NEXT: por %xmm11, %xmm12
+; SSE-NEXT: movdqa %xmm1, %xmm14
+; SSE-NEXT: pandn %xmm12, %xmm14
+; SSE-NEXT: movdqa 80(%rcx), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm0[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm10, %xmm15
-; SSE-NEXT: pandn %xmm11, %xmm15
-; SSE-NEXT: movdqa 80(%rdx), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm3[1,1,2,2]
-; SSE-NEXT: pand %xmm10, %xmm11
-; SSE-NEXT: por %xmm11, %xmm15
-; SSE-NEXT: pand %xmm1, %xmm15
-; SSE-NEXT: por %xmm0, %xmm15
-; SSE-NEXT: pand %xmm2, %xmm15
-; SSE-NEXT: movdqa 80(%r8), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: por %xmm15, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm12
+; SSE-NEXT: pandn %xmm11, %xmm12
+; SSE-NEXT: movdqa 80(%rdx), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 96(%rdi), %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm0[1,1,2,2]
+; SSE-NEXT: pand %xmm2, %xmm15
+; SSE-NEXT: por %xmm15, %xmm12
+; SSE-NEXT: pand %xmm1, %xmm12
+; SSE-NEXT: por %xmm14, %xmm12
+; SSE-NEXT: pand %xmm9, %xmm12
+; SSE-NEXT: movdqa 80(%r8), %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm9, %xmm0
; SSE-NEXT: pandn %xmm4, %xmm0
-; SSE-NEXT: movdqa 96(%rsi), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm3[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm11
-; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: pandn %xmm11, %xmm0
-; SSE-NEXT: movdqa 96(%rcx), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm3[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm10, %xmm15
-; SSE-NEXT: pandn %xmm11, %xmm15
-; SSE-NEXT: movdqa 96(%rdx), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm3[1,1,2,2]
-; SSE-NEXT: pand %xmm10, %xmm11
-; SSE-NEXT: por %xmm11, %xmm15
-; SSE-NEXT: pand %xmm1, %xmm15
-; SSE-NEXT: por %xmm0, %xmm15
-; SSE-NEXT: pand %xmm2, %xmm15
-; SSE-NEXT: movdqa 96(%r8), %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: por %xmm15, %xmm0
+; SSE-NEXT: por %xmm12, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 96(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, %xmm12
+; SSE-NEXT: pandn %xmm0, %xmm12
+; SSE-NEXT: movdqa 96(%rsi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm10, %xmm14
+; SSE-NEXT: por %xmm12, %xmm14
+; SSE-NEXT: movdqa %xmm1, %xmm12
+; SSE-NEXT: pandn %xmm14, %xmm12
+; SSE-NEXT: movdqa 96(%rcx), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 112(%rdi), %xmm4
+; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm2, %xmm15
+; SSE-NEXT: pandn %xmm14, %xmm15
+; SSE-NEXT: movdqa 96(%rdx), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm0[1,1,2,2]
+; SSE-NEXT: pand %xmm2, %xmm14
+; SSE-NEXT: por %xmm14, %xmm15
+; SSE-NEXT: pand %xmm1, %xmm15
+; SSE-NEXT: por %xmm12, %xmm15
+; SSE-NEXT: pand %xmm9, %xmm15
+; SSE-NEXT: movdqa 96(%r8), %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm9, %xmm0
; SSE-NEXT: pandn %xmm4, %xmm0
-; SSE-NEXT: movdqa 112(%rsi), %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm4[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm11
-; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: movdqa 112(%rcx), %xmm0
+; SSE-NEXT: por %xmm15, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm10, %xmm15
-; SSE-NEXT: pandn %xmm0, %xmm15
-; SSE-NEXT: movdqa 112(%rdx), %xmm0
+; SSE-NEXT: movdqa 112(%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: por %xmm0, %xmm15
-; SSE-NEXT: pand %xmm1, %xmm15
-; SSE-NEXT: pandn %xmm11, %xmm1
-; SSE-NEXT: por %xmm15, %xmm1
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: movdqa 112(%r8), %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm12
+; SSE-NEXT: pandn %xmm0, %xmm12
+; SSE-NEXT: movdqa 112(%rsi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm1, %xmm11
-; SSE-NEXT: pandn %xmm0, %xmm11
-; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm0[0,1,3,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,1,1,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm10, %xmm14
+; SSE-NEXT: por %xmm12, %xmm14
+; SSE-NEXT: movdqa 112(%rcx), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm2, %xmm15
+; SSE-NEXT: pandn %xmm12, %xmm15
+; SSE-NEXT: movdqa 112(%rdx), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm0[1,1,2,2]
+; SSE-NEXT: pand %xmm2, %xmm12
+; SSE-NEXT: por %xmm12, %xmm15
; SSE-NEXT: pand %xmm1, %xmm15
-; SSE-NEXT: por %xmm11, %xmm15
-; SSE-NEXT: movdqa %xmm9, %xmm13
-; SSE-NEXT: pand %xmm9, %xmm15
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm4[0,1,0,1]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm11, %xmm1
+; SSE-NEXT: pandn %xmm14, %xmm1
; SSE-NEXT: por %xmm15, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: movdqa %xmm2, %xmm15
-; SSE-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm7[0],xmm15[1],xmm7[1],xmm15[2],xmm7[2],xmm15[3],xmm7[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm15[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,4,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,0,65535,65535,65535,65535,0,65535]
-; SSE-NEXT: movdqa %xmm15, %xmm1
-; SSE-NEXT: pandn %xmm11, %xmm1
-; SSE-NEXT: pand %xmm15, %xmm0
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7]
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm9, %xmm11
-; SSE-NEXT: pandn %xmm1, %xmm11
-; SSE-NEXT: movdqa %xmm14, %xmm1
-; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
; SSE-NEXT: pand %xmm9, %xmm1
-; SSE-NEXT: por %xmm11, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,65535,65535,65535,65535,0,65535,65535]
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
-; SSE-NEXT: movdqa %xmm3, %xmm11
-; SSE-NEXT: pandn %xmm0, %xmm11
-; SSE-NEXT: por %xmm1, %xmm11
-; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: psrlq $48, %xmm12
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm14 = xmm14[1],xmm12[1]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm14, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,3,3]
-; SSE-NEXT: pand %xmm9, %xmm7
-; SSE-NEXT: por %xmm1, %xmm7
+; SSE-NEXT: movdqa 112(%r8), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: por %xmm1, %xmm9
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm7, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm1[0,0,2,1]
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,0,0,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm11, %xmm14
+; SSE-NEXT: pandn %xmm12, %xmm14
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm4, %xmm12
+; SSE-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm12[0,1,3,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,1,1,1]
+; SSE-NEXT: pand %xmm11, %xmm15
+; SSE-NEXT: por %xmm14, %xmm15
+; SSE-NEXT: pand %xmm10, %xmm15
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,1,0,1]
; SSE-NEXT: movdqa %xmm10, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm7
-; SSE-NEXT: por %xmm7, %xmm1
+; SSE-NEXT: por %xmm15, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[2,3,2,3]
+; SSE-NEXT: movdqa %xmm5, %xmm14
+; SSE-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm7[0],xmm14[1],xmm7[1],xmm14[2],xmm7[2],xmm14[3],xmm7[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm14[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,4,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm14[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm12[0],xmm15[1],xmm12[1]
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: pand %xmm12, %xmm15
+; SSE-NEXT: por %xmm15, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm7[0,1,2,3,4,5,7,6]
+; SSE-NEXT: pslldq {{.*#+}} xmm7 = zero,zero,xmm7[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm1, %xmm12
+; SSE-NEXT: pandn %xmm7, %xmm12
+; SSE-NEXT: movdqa %xmm4, %xmm7
+; SSE-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm7
+; SSE-NEXT: por %xmm12, %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [0,65535,65535,65535,65535,0,65535,65535]
+; SSE-NEXT: pand %xmm14, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,3,2,3]
+; SSE-NEXT: movdqa %xmm14, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: por %xmm7, %xmm9
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: psrlq $48, %xmm13
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm13[1]
+; SSE-NEXT: movdqa %xmm1, %xmm7
+; SSE-NEXT: pandn %xmm4, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm15[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm13
+; SSE-NEXT: por %xmm7, %xmm13
+; SSE-NEXT: movdqa %xmm2, %xmm7
+; SSE-NEXT: pandn %xmm0, %xmm7
+; SSE-NEXT: pand %xmm2, %xmm13
+; SSE-NEXT: por %xmm13, %xmm7
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm3, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; SSE-NEXT: movdqa %xmm11, %xmm12
+; SSE-NEXT: movdqa %xmm11, %xmm7
+; SSE-NEXT: pandn %xmm0, %xmm7
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm15, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm0[0,1,3,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm13[0,1,1,1]
+; SSE-NEXT: pand %xmm11, %xmm13
+; SSE-NEXT: por %xmm7, %xmm13
+; SSE-NEXT: pand %xmm10, %xmm13
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,1,0,1]
+; SSE-NEXT: movdqa %xmm10, %xmm9
+; SSE-NEXT: pandn %xmm7, %xmm9
+; SSE-NEXT: por %xmm13, %xmm9
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE-NEXT: movdqa %xmm2, %xmm13
+; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm13[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,4,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm13[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: pandn %xmm7, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm13
+; SSE-NEXT: por %xmm13, %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,4,5,7,6]
+; SSE-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: movdqa %xmm1, %xmm7
+; SSE-NEXT: pandn %xmm3, %xmm7
+; SSE-NEXT: movdqa %xmm15, %xmm3
+; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: pand %xmm14, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,3,2,3]
+; SSE-NEXT: movdqa %xmm14, %xmm9
+; SSE-NEXT: pandn %xmm7, %xmm9
+; SSE-NEXT: por %xmm3, %xmm9
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: psrlq $48, %xmm4
+; SSE-NEXT: movdqa %xmm15, %xmm9
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm4[1]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm9, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,65535,65535,0]
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pandn %xmm7, %xmm3
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; SSE-NEXT: movdqa %xmm12, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm10, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm0[0,1,3,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,1,1]
-; SSE-NEXT: pand %xmm4, %xmm7
-; SSE-NEXT: por %xmm1, %xmm7
-; SSE-NEXT: pand %xmm13, %xmm7
+; SSE-NEXT: pand %xmm12, %xmm7
+; SSE-NEXT: por %xmm3, %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,65535,65535,65535,0,65535,65535,65535]
+; SSE-NEXT: pand %xmm12, %xmm7
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,1,0,1]
-; SSE-NEXT: movdqa %xmm13, %xmm11
-; SSE-NEXT: pandn %xmm1, %xmm11
-; SSE-NEXT: por %xmm7, %xmm11
-; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,1,0,1]
+; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm2
+; SSE-NEXT: por %xmm7, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE-NEXT: movdqa %xmm2, %xmm7
+; SSE-NEXT: movdqa %xmm15, %xmm7
; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; SSE-NEXT: movdqa %xmm5, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,4,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: pand %xmm15, %xmm7
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm7
; SSE-NEXT: por %xmm7, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm12, %xmm11
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: movdqa %xmm14, %xmm12
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
-; SSE-NEXT: pand %xmm9, %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
-; SSE-NEXT: movdqa %xmm3, %xmm7
-; SSE-NEXT: movdqa %xmm3, %xmm14
-; SSE-NEXT: pandn %xmm1, %xmm7
-; SSE-NEXT: por %xmm0, %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: psrlq $48, %xmm0
-; SSE-NEXT: movdqa %xmm11, %xmm3
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm9, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,3,3]
-; SSE-NEXT: pand %xmm9, %xmm5
-; SSE-NEXT: por %xmm0, %xmm5
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm5
+; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5,7,6]
+; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: pand %xmm14, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,3,2,3]
+; SSE-NEXT: movdqa %xmm14, %xmm10
+; SSE-NEXT: pandn %xmm3, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm13, %xmm5
+; SSE-NEXT: psrlq $48, %xmm5
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm5[1]
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: pandn %xmm2, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm2
+; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm9, %xmm13
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,0,65535,65535,65535,0]
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[0,1,3,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,1]
; SSE-NEXT: pand %xmm4, %xmm5
-; SSE-NEXT: por %xmm1, %xmm5
-; SSE-NEXT: movdqa %xmm13, %xmm2
-; SSE-NEXT: pand %xmm13, %xmm5
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,1,0,1]
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: por %xmm5, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm4, %xmm15
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: pand %xmm12, %xmm5
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[0,1,0,1]
+; SSE-NEXT: movdqa %xmm12, %xmm4
+; SSE-NEXT: pandn %xmm3, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE-NEXT: movdqa %xmm12, %xmm5
+; SSE-NEXT: movdqa %xmm9, %xmm5
; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,4,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: pand %xmm15, %xmm5
-; SSE-NEXT: por %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm7, %xmm3
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
-; SSE-NEXT: pand %xmm9, %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: pand %xmm14, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,3,2,3]
-; SSE-NEXT: movdqa %xmm14, %xmm7
-; SSE-NEXT: pandn %xmm1, %xmm7
-; SSE-NEXT: por %xmm0, %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: psrlq $48, %xmm0
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm9, %xmm0
; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,3,3]
-; SSE-NEXT: pand %xmm9, %xmm5
-; SSE-NEXT: por %xmm0, %xmm5
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm5
+; SSE-NEXT: pand %xmm11, %xmm5
; SSE-NEXT: por %xmm5, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[0,1,3,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,1]
-; SSE-NEXT: pand %xmm4, %xmm5
-; SSE-NEXT: por %xmm1, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,1,0,1]
-; SSE-NEXT: movdqa %xmm2, %xmm12
-; SSE-NEXT: pandn %xmm1, %xmm12
-; SSE-NEXT: por %xmm5, %xmm12
-; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,5,7,6]
+; SSE-NEXT: pslldq {{.*#+}} xmm6 = zero,zero,xmm6[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm6, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: pand %xmm14, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[2,3,2,3]
+; SSE-NEXT: movdqa %xmm14, %xmm4
+; SSE-NEXT: pandn %xmm3, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm7, %xmm5
-; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,4,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: pand %xmm15, %xmm5
-; SSE-NEXT: por %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm6, %xmm5
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
-; SSE-NEXT: pand %xmm9, %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: pand %xmm14, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,3,2,3]
-; SSE-NEXT: movdqa %xmm14, %xmm6
-; SSE-NEXT: pandn %xmm1, %xmm6
-; SSE-NEXT: por %xmm0, %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: psrlq $48, %xmm0
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm9, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm8[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,3,3]
-; SSE-NEXT: pand %xmm9, %xmm5
-; SSE-NEXT: por %xmm0, %xmm5
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm5
-; SSE-NEXT: por %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: psrlq $48, %xmm5
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm5[1]
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: pandn %xmm2, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: por %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm13, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm2
+; SSE-NEXT: pand %xmm13, %xmm0
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm8, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; SSE-NEXT: movdqa %xmm15, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[0,1,3,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,1]
-; SSE-NEXT: pand %xmm4, %xmm5
-; SSE-NEXT: por %xmm1, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
-; SSE-NEXT: movdqa (%rsp), %xmm8 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[0,1,0,1]
-; SSE-NEXT: movdqa %xmm2, %xmm12
-; SSE-NEXT: pandn %xmm1, %xmm12
-; SSE-NEXT: por %xmm5, %xmm12
-; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm15, %xmm5
+; SSE-NEXT: movdqa %xmm15, %xmm4
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: pand %xmm12, %xmm5
+; SSE-NEXT: movdqa (%rsp), %xmm9 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[0,1,0,1]
+; SSE-NEXT: movdqa %xmm12, %xmm10
+; SSE-NEXT: pandn %xmm3, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE-NEXT: movdqa %xmm6, %xmm5
-; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3]
+; SSE-NEXT: movdqa %xmm7, %xmm5
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,4,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm15, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: pand %xmm15, %xmm5
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm5
; SSE-NEXT: por %xmm5, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm1
-; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm7, %xmm5
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
-; SSE-NEXT: pand %xmm9, %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: pand %xmm14, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[2,3,2,3]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,5,7,6]
+; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,xmm8[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm8, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: pand %xmm14, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[2,3,2,3]
; SSE-NEXT: movdqa %xmm14, %xmm7
-; SSE-NEXT: pandn %xmm1, %xmm7
-; SSE-NEXT: por %xmm0, %xmm7
+; SSE-NEXT: pandn %xmm3, %xmm7
+; SSE-NEXT: por %xmm5, %xmm7
; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: psrlq $48, %xmm0
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm0[1]
+; SSE-NEXT: movdqa %xmm6, %xmm5
+; SSE-NEXT: psrlq $48, %xmm5
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm5[1]
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: pandn %xmm2, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: por %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm13, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm2
+; SSE-NEXT: pand %xmm13, %xmm0
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
; SSE-NEXT: movdqa %xmm9, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
-; SSE-NEXT: pand %xmm9, %xmm3
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm3
-; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm7, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,1,3,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,1,1]
-; SSE-NEXT: pand %xmm4, %xmm5
-; SSE-NEXT: por %xmm1, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,1,0,1]
+; SSE-NEXT: movdqa %xmm15, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: por %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; SSE-NEXT: movdqa %xmm6, %xmm5
-; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3]
-; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[0,1,3,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,1]
+; SSE-NEXT: pand %xmm15, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: pand %xmm12, %xmm5
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,1,0,1]
+; SSE-NEXT: movdqa %xmm12, %xmm10
+; SSE-NEXT: pandn %xmm3, %xmm10
+; SSE-NEXT: por %xmm5, %xmm10
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE-NEXT: movdqa %xmm7, %xmm5
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,4,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; SSE-NEXT: movdqa %xmm15, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: pand %xmm15, %xmm5
-; SSE-NEXT: movdqa %xmm15, %xmm13
-; SSE-NEXT: por %xmm5, %xmm3
-; SSE-NEXT: movdqa %xmm3, (%rsp) # 16-byte Spill
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm6
-; SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: pand %xmm11, %xmm5
+; SSE-NEXT: movdqa %xmm11, %xmm10
+; SSE-NEXT: por %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; SSE-NEXT: movdqa %xmm9, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; SSE-NEXT: pand %xmm9, %xmm1
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: pand %xmm14, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[2,3,2,3]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,4,5,7,6]
+; SSE-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: movdqa %xmm3, %xmm5
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm5, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: pand %xmm14, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[2,3,2,3]
; SSE-NEXT: movdqa %xmm14, %xmm7
; SSE-NEXT: pandn %xmm3, %xmm7
-; SSE-NEXT: por %xmm1, %xmm7
+; SSE-NEXT: por %xmm5, %xmm7
; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm8, %xmm1
-; SSE-NEXT: psrlq $48, %xmm1
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm5[2,1,3,3]
-; SSE-NEXT: pand %xmm9, %xmm8
-; SSE-NEXT: por %xmm1, %xmm8
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm8
-; SSE-NEXT: por %xmm8, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm11, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1]
-; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm5, %xmm8
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1],xmm8[2],xmm12[2],xmm8[3],xmm12[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[0,1,3,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm1[0,1,1,1]
-; SSE-NEXT: pand %xmm4, %xmm14
-; SSE-NEXT: por %xmm3, %xmm14
-; SSE-NEXT: pand %xmm2, %xmm14
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,1,0,1]
-; SSE-NEXT: movdqa %xmm2, %xmm15
-; SSE-NEXT: pandn %xmm3, %xmm15
-; SSE-NEXT: por %xmm14, %xmm15
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,3,2,3]
-; SSE-NEXT: movdqa %xmm0, %xmm14
-; SSE-NEXT: movdqa %xmm0, %xmm6
-; SSE-NEXT: movdqa %xmm11, %xmm1
-; SSE-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm11[0],xmm14[1],xmm11[1],xmm14[2],xmm11[2],xmm14[3],xmm11[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm14[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,4,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
-; SSE-NEXT: movdqa %xmm13, %xmm14
-; SSE-NEXT: pandn %xmm3, %xmm14
+; SSE-NEXT: movdqa %xmm6, %xmm5
+; SSE-NEXT: psrlq $48, %xmm5
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm5[1]
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: pandn %xmm2, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: por %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm13, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm2
; SSE-NEXT: pand %xmm13, %xmm0
-; SSE-NEXT: por %xmm0, %xmm14
-; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm6
+; SSE-NEXT: movdqa %xmm13, %xmm15
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm2, %xmm7
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm7[0,1,3,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm0[0,1,1,1]
+; SSE-NEXT: pand %xmm4, %xmm13
+; SSE-NEXT: por %xmm3, %xmm13
+; SSE-NEXT: movdqa %xmm12, %xmm11
+; SSE-NEXT: pand %xmm12, %xmm13
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,1,0,1]
+; SSE-NEXT: pandn %xmm3, %xmm11
+; SSE-NEXT: por %xmm13, %xmm11
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; SSE-NEXT: movdqa %xmm5, %xmm13
+; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1],xmm13[2],xmm9[2],xmm13[3],xmm9[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm13[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,4,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
+; SSE-NEXT: movdqa %xmm10, %xmm13
+; SSE-NEXT: pandn %xmm3, %xmm13
+; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: por %xmm0, %xmm13
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,4,5,7,6]
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
-; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm7
+; SSE-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [0,65535,65535,65535,65535,0,65535,65535]
-; SSE-NEXT: pand %xmm11, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,3,2,3]
-; SSE-NEXT: pandn %xmm1, %xmm11
-; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: pand %xmm14, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[2,3,2,3]
+; SSE-NEXT: movdqa %xmm14, %xmm12
+; SSE-NEXT: pandn %xmm3, %xmm12
+; SSE-NEXT: por %xmm0, %xmm12
+; SSE-NEXT: movdqa %xmm6, %xmm0
; SSE-NEXT: psrlq $48, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm3
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm9, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
-; SSE-NEXT: pand %xmm9, %xmm3
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm10, %xmm12
-; SSE-NEXT: pandn %xmm1, %xmm12
-; SSE-NEXT: pand %xmm10, %xmm3
-; SSE-NEXT: por %xmm3, %xmm12
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm8, %xmm3
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1]
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: por %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm15, %xmm10
+; SSE-NEXT: pandn %xmm3, %xmm10
+; SSE-NEXT: pand %xmm15, %xmm5
+; SSE-NEXT: por %xmm5, %xmm10
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm3[0,1,3,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm13[0,1,1,1]
-; SSE-NEXT: pand %xmm4, %xmm13
-; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: por %xmm13, %xmm4
-; SSE-NEXT: pand %xmm2, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,1,0,1]
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: por %xmm4, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm13
-; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,2,1]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm8, %xmm5
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm5[0,1,3,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,1,1,1]
+; SSE-NEXT: pand %xmm4, %xmm15
+; SSE-NEXT: pandn %xmm3, %xmm4
+; SSE-NEXT: por %xmm15, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,0,65535,65535,65535]
; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: pandn %xmm1, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,1,0,1]
+; SSE-NEXT: pandn %xmm3, %xmm0
; SSE-NEXT: por %xmm4, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm13, %xmm3
-; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm7
-; SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSE-NEXT: movdqa %xmm9, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm8, %xmm1
-; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm5[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; SSE-NEXT: movdqa %xmm2, %xmm15
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,4,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm2
+; SSE-NEXT: por %xmm5, %xmm2
+; SSE-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,4,5,7,6]
+; SSE-NEXT: pslldq {{.*#+}} xmm6 = zero,zero,xmm6[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pandn %xmm6, %xmm4
+; SSE-NEXT: movdqa %xmm8, %xmm5
+; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: por %xmm4, %xmm5
+; SSE-NEXT: pand %xmm14, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[2,3,2,3]
+; SSE-NEXT: pandn %xmm4, %xmm14
+; SSE-NEXT: por %xmm5, %xmm14
+; SSE-NEXT: psrlq $48, %xmm9
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm8 = xmm8[1],xmm9[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm8, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,65535,65535,65535,65535,0,65535,65535]
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,65535,65535,0]
; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[2,3,2,3]
-; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: pandn %xmm4, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: psrlq $48, %xmm6
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm8 = xmm8[1],xmm6[1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,4,5,7,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
-; SSE-NEXT: pand %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm8, %xmm9
-; SSE-NEXT: por %xmm1, %xmm9
-; SSE-NEXT: pand %xmm10, %xmm9
-; SSE-NEXT: pandn %xmm3, %xmm10
-; SSE-NEXT: por %xmm9, %xmm10
-; SSE-NEXT: movdqa %xmm10, 624(%r9)
-; SSE-NEXT: movdqa %xmm0, 608(%r9)
+; SSE-NEXT: movdqa %xmm0, 624(%r9)
+; SSE-NEXT: movdqa %xmm14, 608(%r9)
+; SSE-NEXT: movdqa %xmm2, 576(%r9)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, 576(%r9)
-; SSE-NEXT: movdqa %xmm2, 560(%r9)
-; SSE-NEXT: movdqa %xmm12, 544(%r9)
-; SSE-NEXT: movdqa %xmm11, 528(%r9)
-; SSE-NEXT: movdqa %xmm14, 496(%r9)
-; SSE-NEXT: movdqa %xmm15, 480(%r9)
+; SSE-NEXT: movaps %xmm0, 560(%r9)
+; SSE-NEXT: movdqa %xmm10, 544(%r9)
+; SSE-NEXT: movdqa %xmm12, 528(%r9)
+; SSE-NEXT: movdqa %xmm13, 496(%r9)
+; SSE-NEXT: movdqa %xmm11, 480(%r9)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 464(%r9)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index 79cc8e49f1fdb1..ea1e1724193b52 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -906,21 +906,21 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-LABEL: store_i16_stride7_vf8:
; SSE: # %bb.0:
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movdqa (%rdi), %xmm3
+; SSE-NEXT: movdqa (%rdi), %xmm2
; SSE-NEXT: movdqa (%rsi), %xmm8
; SSE-NEXT: movdqa (%rdx), %xmm5
; SSE-NEXT: movdqa (%rcx), %xmm11
; SSE-NEXT: movdqa (%r8), %xmm4
; SSE-NEXT: movdqa (%r9), %xmm10
-; SSE-NEXT: movdqa (%rax), %xmm2
+; SSE-NEXT: movdqa (%rax), %xmm3
; SSE-NEXT: movdqa %xmm5, %xmm0
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
; SSE-NEXT: movdqa %xmm6, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,2],xmm0[2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm2[2,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm3[2,3,2,3]
; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,0,65535,65535,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm4, %xmm7
; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3]
@@ -933,7 +933,7 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: por %xmm13, %xmm12
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm12[0,3]
; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE-NEXT: pandn %xmm2, %xmm9
+; SSE-NEXT: pandn %xmm3, %xmm9
; SSE-NEXT: movdqa %xmm7, %xmm12
; SSE-NEXT: movdqa %xmm7, %xmm13
; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -952,7 +952,7 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: andps %xmm9, %xmm14
; SSE-NEXT: andnps %xmm13, %xmm9
; SSE-NEXT: orps %xmm14, %xmm9
-; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm2[0,1,0,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm3[0,1,0,1]
; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,65535,65535,65535,0,65535,65535]
; SSE-NEXT: pslldq {{.*#+}} xmm12 = zero,zero,xmm12[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; SSE-NEXT: pand %xmm14, %xmm12
@@ -964,7 +964,7 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,0,0,65535,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm8, %xmm15
-; SSE-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm3[0],xmm15[1],xmm3[1],xmm15[2],xmm3[2],xmm15[3],xmm3[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm15[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,5,5,4]
; SSE-NEXT: pand %xmm12, %xmm15
@@ -994,12 +994,12 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: por %xmm13, %xmm12
; SSE-NEXT: movdqa {{.*#+}} xmm10 = [0,65535,65535,65535,65535,65535,65535,0]
; SSE-NEXT: pand %xmm10, %xmm12
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm2[3,3,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm3[3,3,3,3]
; SSE-NEXT: pandn %xmm11, %xmm10
; SSE-NEXT: por %xmm12, %xmm10
-; SSE-NEXT: movdqa %xmm3, %xmm12
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm3[1,1,1,1,4,5,6,7]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; SSE-NEXT: movdqa %xmm2, %xmm12
+; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm2[1,1,1,1,4,5,6,7]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
; SSE-NEXT: psrld $16, %xmm8
; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm8[0],xmm12[1],xmm8[1]
; SSE-NEXT: movdqa %xmm0, %xmm8
@@ -1011,7 +1011,7 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,0,0,0,65535,65535,65535]
; SSE-NEXT: pand %xmm8, %xmm12
; SSE-NEXT: movdqa %xmm7, %xmm13
-; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,2],xmm2[1,1]
+; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,2],xmm3[1,1]
; SSE-NEXT: pandn %xmm13, %xmm8
; SSE-NEXT: por %xmm12, %xmm8
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
@@ -1020,19 +1020,18 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pand %xmm0, %xmm6
; SSE-NEXT: pandn %xmm12, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
-; SSE-NEXT: movaps %xmm2, %xmm6
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,3],xmm14[0,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,1,1]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,3],xmm14[0,1]
; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm4[2,1]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,1]
; SSE-NEXT: movaps {{.*#+}} xmm4 = [65535,65535,65535,0,0,0,0,65535]
-; SSE-NEXT: andps %xmm4, %xmm6
+; SSE-NEXT: andps %xmm4, %xmm3
; SSE-NEXT: andnps %xmm0, %xmm4
-; SSE-NEXT: orps %xmm6, %xmm4
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm3[0]
+; SSE-NEXT: orps %xmm3, %xmm4
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm11[2,1]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
; SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,65535,65535,65535,0,0,0,65535]
; SSE-NEXT: andps %xmm2, %xmm5
; SSE-NEXT: andnps %xmm0, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index 0495e240ba968a..c72b8518900e58 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -690,74 +690,74 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; SSE: # %bb.0:
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE-NEXT: movq {{.*#+}} xmm3 = mem[0],zero
-; SSE-NEXT: movq {{.*#+}} xmm10 = mem[0],zero
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movq {{.*#+}} xmm9 = mem[0],zero
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
-; SSE-NEXT: movq {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: movq {{.*#+}} xmm5 = mem[0],zero
; SSE-NEXT: movq {{.*#+}} xmm14 = mem[0],zero
; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: movq {{.*#+}} xmm5 = mem[0],zero
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
+; SSE-NEXT: movq {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,0,0,0,4,5,6,7]
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
; SSE-NEXT: pand %xmm6, %xmm0
-; SSE-NEXT: movdqa %xmm4, %xmm7
-; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; SSE-NEXT: movdqa %xmm5, %xmm7
+; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm7[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,1,3]
; SSE-NEXT: pandn %xmm8, %xmm6
; SSE-NEXT: por %xmm0, %xmm6
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
; SSE-NEXT: pand %xmm8, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm10[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,1,1,0]
-; SSE-NEXT: movdqa %xmm8, %xmm12
-; SSE-NEXT: pandn %xmm11, %xmm12
-; SSE-NEXT: por %xmm0, %xmm12
-; SSE-NEXT: pand %xmm9, %xmm12
-; SSE-NEXT: pandn %xmm6, %xmm9
-; SSE-NEXT: por %xmm12, %xmm9
+; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm9[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,1,0]
+; SSE-NEXT: movdqa %xmm8, %xmm11
+; SSE-NEXT: pandn %xmm10, %xmm11
+; SSE-NEXT: por %xmm0, %xmm11
+; SSE-NEXT: pand %xmm12, %xmm11
+; SSE-NEXT: pandn %xmm6, %xmm12
+; SSE-NEXT: por %xmm11, %xmm12
; SSE-NEXT: pxor %xmm0, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm12
-; SSE-NEXT: movdqa %xmm5, %xmm15
+; SSE-NEXT: movdqa %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm4, %xmm15
; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE-NEXT: movdqa %xmm12, %xmm13
+; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3],xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; SSE-NEXT: movdqa %xmm11, %xmm13
; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3]
; SSE-NEXT: movdqa %xmm13, %xmm0
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
; SSE-NEXT: movdqa %xmm13, %xmm6
; SSE-NEXT: packuswb %xmm0, %xmm6
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255]
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm14[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: movdqa %xmm11, %xmm14
+; SSE-NEXT: movdqa %xmm10, %xmm14
; SSE-NEXT: pandn %xmm0, %xmm14
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,3]
-; SSE-NEXT: pand %xmm11, %xmm0
+; SSE-NEXT: pand %xmm10, %xmm0
; SSE-NEXT: por %xmm0, %xmm14
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
-; SSE-NEXT: pand %xmm6, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm12
; SSE-NEXT: pandn %xmm14, %xmm6
-; SSE-NEXT: por %xmm9, %xmm6
+; SSE-NEXT: por %xmm12, %xmm6
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,2,3,3]
-; SSE-NEXT: movdqa %xmm11, %xmm9
-; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: movdqa %xmm10, %xmm12
+; SSE-NEXT: pandn %xmm0, %xmm12
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm11, %xmm0
-; SSE-NEXT: por %xmm9, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; SSE-NEXT: movdqa %xmm1, %xmm9
-; SSE-NEXT: pandn %xmm0, %xmm9
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: por %xmm12, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm1
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm9[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,2,2]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
; SSE-NEXT: movdqa %xmm0, %xmm14
@@ -766,85 +766,85 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: por %xmm2, %xmm14
-; SSE-NEXT: pand %xmm1, %xmm14
-; SSE-NEXT: por %xmm9, %xmm14
-; SSE-NEXT: movdqa %xmm15, %xmm2
-; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
-; SSE-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,4]
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm12[0,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
-; SSE-NEXT: packuswb %xmm2, %xmm9
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm12, %xmm9
+; SSE-NEXT: pand %xmm12, %xmm14
+; SSE-NEXT: por %xmm1, %xmm14
+; SSE-NEXT: movdqa %xmm15, %xmm1
+; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
+; SSE-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,4]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm11[0,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
+; SSE-NEXT: packuswb %xmm1, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm1, %xmm2
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm15[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; SSE-NEXT: pandn %xmm2, %xmm12
-; SSE-NEXT: por %xmm9, %xmm12
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
-; SSE-NEXT: pand %xmm2, %xmm12
-; SSE-NEXT: pandn %xmm14, %xmm2
-; SSE-NEXT: por %xmm2, %xmm12
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[2,2,3,3]
-; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: packuswb %xmm13, %xmm2
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm15[1,1,1,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,0,0,0]
-; SSE-NEXT: pandn %xmm9, %xmm0
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[0,1,2,2]
-; SSE-NEXT: movdqa %xmm3, %xmm10
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm3[1,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,0,2,1]
-; SSE-NEXT: pand %xmm11, %xmm9
-; SSE-NEXT: pandn %xmm2, %xmm11
-; SSE-NEXT: por %xmm9, %xmm11
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[1,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT: pand %xmm8, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,4]
-; SSE-NEXT: pandn %xmm7, %xmm8
-; SSE-NEXT: por %xmm2, %xmm8
-; SSE-NEXT: pand %xmm1, %xmm8
+; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm15[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,0,0,0]
; SSE-NEXT: pandn %xmm11, %xmm1
-; SSE-NEXT: por %xmm8, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm2
; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,7,7,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7]
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,7,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,2,2,2]
-; SSE-NEXT: pand %xmm0, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
+; SSE-NEXT: pand %xmm11, %xmm1
+; SSE-NEXT: pandn %xmm14, %xmm11
+; SSE-NEXT: por %xmm1, %xmm11
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,2,3,3]
+; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: packuswb %xmm13, %xmm1
+; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm15[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm5[3,1,2,3]
-; SSE-NEXT: por %xmm4, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm15[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm4, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
-; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm4, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,1,2,2]
+; SSE-NEXT: movdqa %xmm3, %xmm9
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[1,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
+; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm10
+; SSE-NEXT: por %xmm2, %xmm10
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[1,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1]
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[1,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,4]
+; SSE-NEXT: pandn %xmm2, %xmm8
+; SSE-NEXT: por %xmm1, %xmm8
+; SSE-NEXT: pand %xmm12, %xmm8
+; SSE-NEXT: pandn %xmm10, %xmm12
+; SSE-NEXT: por %xmm8, %xmm12
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
+; SSE-NEXT: pand %xmm7, %xmm12
+; SSE-NEXT: pandn %xmm0, %xmm7
+; SSE-NEXT: por %xmm12, %xmm7
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,2,2]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3],xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,2,2]
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,1,2,3]
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm15[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm3, %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,3,2,3,4,5,6,7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm1
+; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movq %xmm2, 48(%rax)
-; SSE-NEXT: movdqa %xmm1, 16(%rax)
-; SSE-NEXT: movdqa %xmm12, 32(%rax)
+; SSE-NEXT: movq %xmm1, 48(%rax)
+; SSE-NEXT: movdqa %xmm7, 16(%rax)
+; SSE-NEXT: movdqa %xmm11, 32(%rax)
; SSE-NEXT: movdqa %xmm6, (%rax)
; SSE-NEXT: retq
;
@@ -1380,113 +1380,110 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: movdqa (%rsi), %xmm4
; SSE-NEXT: movdqa (%rdx), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa (%rcx), %xmm5
+; SSE-NEXT: movdqa (%rcx), %xmm6
; SSE-NEXT: movdqa (%r8), %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa (%r9), %xmm8
+; SSE-NEXT: movdqa (%r9), %xmm13
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: pand %xmm13, %xmm0
-; SSE-NEXT: movdqa %xmm5, %xmm1
-; SSE-NEXT: movdqa %xmm5, %xmm6
-; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: movdqa %xmm6, %xmm1
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: movdqa %xmm13, %xmm2
+; SSE-NEXT: movdqa %xmm12, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: pandn %xmm2, %xmm1
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,7,7,7,7]
-; SSE-NEXT: movdqa %xmm3, %xmm12
+; SSE-NEXT: movdqa %xmm3, %xmm11
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,2,2,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: pand %xmm10, %xmm3
-; SSE-NEXT: movdqa %xmm4, %xmm9
-; SSE-NEXT: movdqa %xmm4, %xmm5
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: movdqa %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm14
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm9[0,1,2,3,4,6,6,7]
-; SSE-NEXT: movdqa %xmm9, (%rsp) # 16-byte Spill
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,3,3]
+; SSE-NEXT: movdqa %xmm8, %xmm10
; SSE-NEXT: pandn %xmm4, %xmm10
; SSE-NEXT: por %xmm3, %xmm10
; SSE-NEXT: pand %xmm0, %xmm10
; SSE-NEXT: por %xmm1, %xmm10
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255]
+; SSE-NEXT: movdqa %xmm5, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm8, %xmm1
-; SSE-NEXT: movdqa %xmm8, %xmm3
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
+; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa %xmm13, %xmm15
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm13[8],xmm1[9],xmm13[9],xmm1[10],xmm13[10],xmm1[11],xmm13[11],xmm1[12],xmm13[12],xmm1[13],xmm13[13],xmm1[14],xmm13[14],xmm1[15],xmm13[15]
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,3]
-; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm1
; SSE-NEXT: por %xmm4, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
; SSE-NEXT: pand %xmm4, %xmm10
; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: movdqa (%rax), %xmm7
+; SSE-NEXT: movdqa (%rax), %xmm9
; SSE-NEXT: por %xmm10, %xmm4
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,6,7,7,7]
-; SSE-NEXT: movdqa %xmm7, %xmm15
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,6,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
-; SSE-NEXT: movdqa %xmm11, %xmm7
-; SSE-NEXT: pandn %xmm1, %xmm7
-; SSE-NEXT: pand %xmm11, %xmm4
-; SSE-NEXT: por %xmm4, %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,1,2,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: pand %xmm10, %xmm4
+; SSE-NEXT: por %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,0,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm13, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm12[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm12, %xmm5
+; SSE-NEXT: movdqa %xmm11, %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: pand %xmm13, %xmm1
; SSE-NEXT: por %xmm1, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm12, %xmm14
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm11, %xmm14
; SSE-NEXT: pandn %xmm1, %xmm14
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm12, %xmm1
+; SSE-NEXT: pand %xmm11, %xmm1
; SSE-NEXT: por %xmm1, %xmm14
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
; SSE-NEXT: pand %xmm1, %xmm4
; SSE-NEXT: pandn %xmm14, %xmm1
; SSE-NEXT: por %xmm4, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,1,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm15[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,0]
-; SSE-NEXT: movdqa %xmm2, %xmm14
+; SSE-NEXT: movdqa %xmm5, %xmm14
; SSE-NEXT: pandn %xmm4, %xmm14
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm6[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm7[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm7, %xmm15
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
; SSE-NEXT: por %xmm4, %xmm14
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
; SSE-NEXT: pand %xmm4, %xmm14
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm15[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm15, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm9[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
; SSE-NEXT: pandn %xmm7, %xmm4
; SSE-NEXT: por %xmm14, %xmm4
@@ -1495,72 +1492,69 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pandn %xmm4, %xmm7
; SSE-NEXT: por %xmm1, %xmm7
; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,4,5,5,7]
+; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: # xmm1 = mem[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: movdqa %xmm13, %xmm4
+; SSE-NEXT: movdqa %xmm12, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm5, %xmm15
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,6,6,6,6]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; SSE-NEXT: pand %xmm13, %xmm1
+; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: por %xmm1, %xmm4
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,6,6,6,6]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; SSE-NEXT: movdqa %xmm2, %xmm7
+; SSE-NEXT: movdqa %xmm5, %xmm7
; SSE-NEXT: pandn %xmm1, %xmm7
; SSE-NEXT: pshufd $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm1
; SSE-NEXT: por %xmm7, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: pandn %xmm4, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: pshufd $229, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[1,1,2,3]
-; SSE-NEXT: movdqa %xmm10, %xmm4
+; SSE-NEXT: movdqa %xmm13, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,5,6,6,7]
-; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm15[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,2]
-; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: pand %xmm13, %xmm1
; SSE-NEXT: por %xmm4, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,5,6,6]
-; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm9[0,1,2,3,4,5,6,6]
+; SSE-NEXT: movdqa %xmm9, %xmm14
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,3,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm5, %xmm7
+; SSE-NEXT: movdqa %xmm8, %xmm7
; SSE-NEXT: pandn %xmm4, %xmm7
-; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm1
; SSE-NEXT: por %xmm1, %xmm7
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0]
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: pandn %xmm7, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: movdqa %xmm8, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[0,0,0,0,4,5,6,7]
-; SSE-NEXT: movdqa %xmm8, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: pand %xmm8, %xmm0
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
-; SSE-NEXT: movdqa %xmm11, %xmm7
+; SSE-NEXT: movdqa %xmm10, %xmm7
; SSE-NEXT: pandn %xmm0, %xmm7
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm15[0,0,2,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm11, %xmm0
+; SSE-NEXT: pand %xmm10, %xmm0
; SSE-NEXT: por %xmm0, %xmm7
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
; SSE-NEXT: pand %xmm0, %xmm7
@@ -1571,41 +1565,40 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1]
-; SSE-NEXT: movdqa %xmm13, %xmm7
+; SSE-NEXT: movdqa %xmm12, %xmm7
; SSE-NEXT: pandn %xmm1, %xmm7
-; SSE-NEXT: movdqa %xmm9, %xmm5
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[0,0,0,0,4,5,6,7]
+; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm15[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT: pand %xmm13, %xmm1
+; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: por %xmm1, %xmm7
-; SSE-NEXT: movdqa %xmm6, %xmm8
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: movdqa %xmm5, %xmm9
; SSE-NEXT: pandn %xmm1, %xmm9
-; SSE-NEXT: pand %xmm2, %xmm7
+; SSE-NEXT: pand %xmm5, %xmm7
; SSE-NEXT: por %xmm7, %xmm9
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
-; SSE-NEXT: pand %xmm14, %xmm0
-; SSE-NEXT: pandn %xmm9, %xmm14
-; SSE-NEXT: por %xmm0, %xmm14
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: pandn %xmm9, %xmm8
+; SSE-NEXT: por %xmm0, %xmm8
; SSE-NEXT: pshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[0,1,1,3]
-; SSE-NEXT: movdqa %xmm10, %xmm7
+; SSE-NEXT: movdqa %xmm13, %xmm7
; SSE-NEXT: pandn %xmm0, %xmm7
-; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,4,5,5,7]
+; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,1,3,2]
-; SSE-NEXT: pand %xmm10, %xmm9
+; SSE-NEXT: pand %xmm13, %xmm9
; SSE-NEXT: por %xmm7, %xmm9
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm15[0,1,2,3,5,5,5,5]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,5,5,5,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm2, %xmm7
+; SSE-NEXT: movdqa %xmm5, %xmm7
; SSE-NEXT: pandn %xmm0, %xmm7
-; SSE-NEXT: pshuflw $233, (%rsp), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: pshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm2, %xmm6
+; SSE-NEXT: pand %xmm5, %xmm6
; SSE-NEXT: por %xmm7, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0]
; SSE-NEXT: pand %xmm0, %xmm6
@@ -1614,29 +1607,28 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: # xmm3 = mem[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
-; SSE-NEXT: movdqa %xmm12, %xmm6
+; SSE-NEXT: movdqa %xmm11, %xmm6
; SSE-NEXT: pandn %xmm3, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm5[0,1,2,3,4,4,6,5]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm15[0,1,2,3,4,4,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
-; SSE-NEXT: pand %xmm12, %xmm3
+; SSE-NEXT: pand %xmm11, %xmm3
; SSE-NEXT: por %xmm3, %xmm6
-; SSE-NEXT: pand %xmm13, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,5,5,7]
+; SSE-NEXT: pand %xmm12, %xmm6
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm14[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE-NEXT: pandn %xmm3, %xmm13
-; SSE-NEXT: por %xmm6, %xmm13
+; SSE-NEXT: pandn %xmm3, %xmm12
+; SSE-NEXT: por %xmm6, %xmm12
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: pandn %xmm13, %xmm3
+; SSE-NEXT: pandn %xmm12, %xmm3
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[2,2,3,3]
-; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: movdqa %xmm5, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: movdqa %xmm1, %xmm13
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pand %xmm5, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
; SSE-NEXT: movdqa %xmm6, %xmm7
@@ -1644,73 +1636,75 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: movdqa %xmm11, %xmm0
; SSE-NEXT: pandn %xmm9, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm15[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm2[0,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm2, %xmm15
; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,1,3]
-; SSE-NEXT: pand %xmm12, %xmm9
+; SSE-NEXT: pand %xmm11, %xmm9
; SSE-NEXT: por %xmm9, %xmm0
; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: por %xmm7, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,4,6,5,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm4[0,1,2,3,4,6,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,3,2]
-; SSE-NEXT: movdqa %xmm11, %xmm9
+; SSE-NEXT: movdqa %xmm10, %xmm9
; SSE-NEXT: pandn %xmm7, %xmm9
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm4[2,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm2[2,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,0,2,1]
-; SSE-NEXT: pand %xmm11, %xmm7
+; SSE-NEXT: pand %xmm10, %xmm7
; SSE-NEXT: por %xmm7, %xmm9
-; SSE-NEXT: pand %xmm10, %xmm9
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm8[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pand %xmm13, %xmm9
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm14[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,0,0,0]
-; SSE-NEXT: pandn %xmm7, %xmm10
-; SSE-NEXT: por %xmm9, %xmm10
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
-; SSE-NEXT: pand %xmm7, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm7
-; SSE-NEXT: por %xmm7, %xmm10
+; SSE-NEXT: pandn %xmm7, %xmm13
+; SSE-NEXT: por %xmm9, %xmm13
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
+; SSE-NEXT: pand %xmm12, %xmm13
+; SSE-NEXT: pandn %xmm0, %xmm12
+; SSE-NEXT: por %xmm13, %xmm12
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm15[1,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,0,2,1]
-; SSE-NEXT: pand %xmm2, %xmm7
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: por %xmm7, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[1,1,2,2,4,5,6,7]
+; SSE-NEXT: pand %xmm5, %xmm7
+; SSE-NEXT: pandn %xmm0, %xmm5
+; SSE-NEXT: por %xmm7, %xmm5
+; SSE-NEXT: pshuflw $165, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: # xmm0 = mem[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm11, %xmm0
+; SSE-NEXT: pand %xmm10, %xmm0
; SSE-NEXT: pshufd $101, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
; SSE-NEXT: # xmm7 = mem[1,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,4]
-; SSE-NEXT: pandn %xmm7, %xmm11
-; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: pand %xmm6, %xmm11
-; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: por %xmm11, %xmm6
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pandn %xmm7, %xmm10
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: pand %xmm6, %xmm10
+; SSE-NEXT: pandn %xmm5, %xmm6
+; SSE-NEXT: por %xmm10, %xmm6
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm5[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
; SSE-NEXT: pandn %xmm2, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pand %xmm12, %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pand %xmm11, %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm14[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: pandn %xmm0, %xmm12
-; SSE-NEXT: por %xmm1, %xmm12
+; SSE-NEXT: pandn %xmm0, %xmm11
+; SSE-NEXT: por %xmm1, %xmm11
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
; SSE-NEXT: pand %xmm0, %xmm6
-; SSE-NEXT: pandn %xmm12, %xmm0
+; SSE-NEXT: pandn %xmm11, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE-NEXT: movdqa %xmm0, 16(%rax)
-; SSE-NEXT: movdqa %xmm10, 32(%rax)
+; SSE-NEXT: movdqa %xmm12, 32(%rax)
; SSE-NEXT: movdqa %xmm3, 64(%rax)
-; SSE-NEXT: movdqa %xmm14, (%rax)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm8, (%rax)
+; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 80(%rax)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 48(%rax)
@@ -2685,72 +2679,70 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %in.vecptr5, ptr %in.vecptr6, ptr %out.vec) nounwind {
; SSE-LABEL: store_i8_stride7_vf32:
; SSE: # %bb.0:
-; SSE-NEXT: subq $360, %rsp # imm = 0x168
-; SSE-NEXT: movdqa 16(%rdi), %xmm1
+; SSE-NEXT: subq $344, %rsp # imm = 0x158
+; SSE-NEXT: movdqa 16(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 16(%rsi), %xmm4
; SSE-NEXT: movdqa 16(%rdx), %xmm3
-; SSE-NEXT: movdqa 16(%rcx), %xmm7
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 16(%r8), %xmm6
+; SSE-NEXT: movdqa 16(%rcx), %xmm14
+; SSE-NEXT: movdqa 16(%r8), %xmm7
; SSE-NEXT: movdqa 16(%r9), %xmm5
; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,6,6,6,6]
-; SSE-NEXT: movdqa %xmm1, %xmm15
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: movdqa %xmm4, %xmm8
-; SSE-NEXT: movdqa %xmm4, %xmm13
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm9
+; SSE-NEXT: movdqa %xmm4, %xmm15
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,4,5,5,7]
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,4,5,5,7]
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: movdqa %xmm12, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm11
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,6,6,6,6]
; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255]
-; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255]
+; SSE-NEXT: movdqa %xmm13, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
+; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm14[8],xmm2[9],xmm14[9],xmm2[10],xmm14[10],xmm2[11],xmm14[11],xmm2[12],xmm14[12],xmm2[13],xmm14[13],xmm2[14],xmm14[14],xmm2[15],xmm14[15]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,2,3]
-; SSE-NEXT: movdqa %xmm2, %xmm7
+; SSE-NEXT: movdqa %xmm2, %xmm8
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: pand %xmm13, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: pand %xmm11, %xmm1
+; SSE-NEXT: pand %xmm6, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
; SSE-NEXT: pand %xmm11, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,2]
; SSE-NEXT: movdqa %xmm11, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
; SSE-NEXT: movdqa %xmm5, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
; SSE-NEXT: movdqa %xmm1, %xmm5
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: pand %xmm12, %xmm3
+; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa 16(%rax), %xmm14
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm14[0,1,2,3,4,5,6,6]
+; SSE-NEXT: movdqa 16(%rax), %xmm10
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,4,5,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
; SSE-NEXT: movdqa %xmm3, %xmm2
@@ -2758,34 +2750,36 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,6,5,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,6,5,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm12, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,7,7,7,7]
+; SSE-NEXT: movdqa %xmm4, %xmm6
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: pand %xmm12, %xmm0
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm15[0,1,2,3,7,7,7,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
; SSE-NEXT: pand %xmm4, %xmm1
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,7,7,7,7]
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,7,7,7,7]
+; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm13, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: pand %xmm13, %xmm3
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,2,2,3]
@@ -2794,114 +2788,116 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pandn %xmm3, %xmm4
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: por %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm14[0,1,2,3,6,7,7,7]
+; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm10[0,1,2,3,6,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,2]
; SSE-NEXT: movdqa %xmm11, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: pand %xmm11, %xmm4
; SSE-NEXT: por %xmm4, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,1,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,0,3]
-; SSE-NEXT: movdqa %xmm12, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm10, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm15[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm12, %xmm1
+; SSE-NEXT: pand %xmm10, %xmm1
; SSE-NEXT: por %xmm1, %xmm4
-; SSE-NEXT: pshufd $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: # xmm1 = mem[2,1,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm15, %xmm7
-; SSE-NEXT: pandn %xmm1, %xmm7
-; SSE-NEXT: pshuflw $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: # xmm1 = mem[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm15, %xmm8
+; SSE-NEXT: pandn %xmm1, %xmm8
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
; SSE-NEXT: pand %xmm15, %xmm1
-; SSE-NEXT: por %xmm1, %xmm7
+; SSE-NEXT: por %xmm1, %xmm8
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pandn %xmm7, %xmm3
+; SSE-NEXT: pandn %xmm8, %xmm3
; SSE-NEXT: pand %xmm1, %xmm4
; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: pshufd $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; SSE-NEXT: # xmm4 = mem[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,0]
-; SSE-NEXT: movdqa %xmm9, %xmm7
-; SSE-NEXT: pandn %xmm4, %xmm7
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm6[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm13, %xmm8
+; SSE-NEXT: pandn %xmm4, %xmm8
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm7[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm4
-; SSE-NEXT: por %xmm4, %xmm7
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm14[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pand %xmm13, %xmm4
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm0, %xmm8
-; SSE-NEXT: pandn %xmm4, %xmm8
-; SSE-NEXT: pand %xmm0, %xmm7
-; SSE-NEXT: por %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm0, %xmm9
+; SSE-NEXT: pandn %xmm4, %xmm9
+; SSE-NEXT: pand %xmm0, %xmm8
+; SSE-NEXT: por %xmm8, %xmm9
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255]
; SSE-NEXT: movdqa %xmm5, %xmm2
-; SSE-NEXT: pandn %xmm8, %xmm2
+; SSE-NEXT: pandn %xmm9, %xmm2
; SSE-NEXT: pand %xmm5, %xmm3
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa (%rsi), %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,1,2,3]
-; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: movdqa %xmm2, %xmm7
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[3,1,0,3]
-; SSE-NEXT: movdqa %xmm12, %xmm3
+; SSE-NEXT: movdqa %xmm10, %xmm3
; SSE-NEXT: pandn %xmm4, %xmm3
-; SSE-NEXT: movdqa (%rdi), %xmm13
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm13[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa (%rdi), %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm12, %xmm4
+; SSE-NEXT: pand %xmm10, %xmm4
; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: movdqa (%rcx), %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,1,2,3]
-; SSE-NEXT: movdqa %xmm2, %xmm14
-; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa (%rcx), %xmm11
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[2,1,2,3]
+; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm15, %xmm7
-; SSE-NEXT: pandn %xmm4, %xmm7
-; SSE-NEXT: movdqa (%rdx), %xmm8
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm8[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm8, (%rsp) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm15, %xmm8
+; SSE-NEXT: pandn %xmm4, %xmm8
+; SSE-NEXT: movdqa (%rdx), %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm2, %xmm12
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
; SSE-NEXT: pand %xmm15, %xmm4
-; SSE-NEXT: por %xmm4, %xmm7
+; SSE-NEXT: por %xmm4, %xmm8
; SSE-NEXT: pand %xmm1, %xmm3
-; SSE-NEXT: pandn %xmm7, %xmm1
+; SSE-NEXT: pandn %xmm8, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa (%r9), %xmm11
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[2,1,2,3]
-; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa (%r9), %xmm10
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[2,1,2,3]
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,0]
-; SSE-NEXT: movdqa %xmm9, %xmm4
+; SSE-NEXT: movdqa %xmm13, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: movdqa (%r8), %xmm7
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa (%r8), %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm2, %xmm8
+; SSE-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: pand %xmm13, %xmm3
; SSE-NEXT: por %xmm3, %xmm4
; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: movdqa (%rax), %xmm10
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm10[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa (%rax), %xmm9
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm9[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
; SSE-NEXT: pandn %xmm3, %xmm0
; SSE-NEXT: por %xmm4, %xmm0
@@ -2909,45 +2905,45 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pandn %xmm0, %xmm5
; SSE-NEXT: por %xmm1, %xmm5
; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
+; SSE-NEXT: movdqa %xmm7, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm7[8],xmm0[9],xmm7[9],xmm0[10],xmm7[10],xmm0[11],xmm7[11],xmm0[12],xmm7[12],xmm0[13],xmm7[13],xmm0[14],xmm7[14],xmm0[15],xmm7[15]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,6,6,6,6]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,6,6,6,6]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm12[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm13, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm14[8],xmm0[9],xmm14[9],xmm0[10],xmm14[10],xmm0[11],xmm14[11],xmm0[12],xmm14[12],xmm0[13],xmm14[13],xmm0[14],xmm14[14],xmm0[15],xmm14[15]
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
+; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
-; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[1,1,2,3]
-; SSE-NEXT: movdqa %xmm12, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[1,1,2,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,2]
-; SSE-NEXT: pand %xmm12, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm10[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm9[0,1,2,3,4,5,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
; SSE-NEXT: movdqa %xmm2, %xmm4
@@ -2959,19 +2955,19 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pandn %xmm4, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[2,2,3,3]
-; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,3,3]
+; SSE-NEXT: movdqa %xmm14, %xmm10
+; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm13, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm7[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2985,22 +2981,24 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE-NEXT: pand %xmm15, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: pand %xmm13, %xmm3
+; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
+; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
+; SSE-NEXT: movdqa %xmm9, %xmm0
; SSE-NEXT: pandn %xmm3, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,4,6,5,7]
; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
-; SSE-NEXT: movdqa %xmm12, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[2,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm12[2,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1]
-; SSE-NEXT: pand %xmm12, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm11
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[2,2,2,2,4,5,6,7]
@@ -3010,37 +3008,38 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pandn %xmm1, %xmm14
; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: por %xmm3, %xmm14
-; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm14
+; SSE-NEXT: pand %xmm9, %xmm14
; SSE-NEXT: por %xmm0, %xmm14
; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,1,2,2]
-; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm13, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[1,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[1,1,2,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,7,5,6,4]
-; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: movdqa %xmm11, %xmm0
; SSE-NEXT: pandn %xmm3, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,2,1]
-; SSE-NEXT: pand %xmm12, %xmm3
-; SSE-NEXT: movdqa %xmm12, %xmm11
+; SSE-NEXT: pand %xmm11, %xmm3
; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: pand %xmm13, %xmm0
+; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm9, %xmm6
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm13, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm12[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT: pand %xmm13, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
@@ -3056,208 +3055,208 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[0,1,1,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: movdqa (%rsp), %xmm8 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,5,5,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm12[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,2]
-; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0]
-; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm14[0,1,2,3,5,5,5,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm9, %xmm4
+; SSE-NEXT: movdqa %xmm13, %xmm4
; SSE-NEXT: pandn %xmm0, %xmm4
; SSE-NEXT: pshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm0
; SSE-NEXT: por %xmm4, %xmm0
-; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
; SSE-NEXT: pshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: # xmm3 = mem[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-NEXT: movdqa %xmm15, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,4,4,6,5]
+; SSE-NEXT: movdqa (%rsp), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,4,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
; SSE-NEXT: pand %xmm15, %xmm3
; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm12[0,1,2,3,4,5,5,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm10[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: pandn %xmm3, %xmm10
-; SSE-NEXT: pand %xmm1, %xmm4
-; SSE-NEXT: por %xmm4, %xmm10
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: pandn %xmm10, %xmm1
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,1,2,2]
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pandn %xmm3, %xmm9
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: por %xmm4, %xmm9
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: pandn %xmm9, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,2,2]
+; SSE-NEXT: movdqa %xmm2, %xmm7
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm13, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[1,1,2,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm14[1,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,2,1]
-; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,2,1]
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,7,5,6,4]
; SSE-NEXT: movdqa %xmm11, %xmm0
; SSE-NEXT: pandn %xmm4, %xmm0
-; SSE-NEXT: movdqa %xmm8, %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm8[1,1,2,2,4,5,6,7]
+; SSE-NEXT: movdqa %xmm12, %xmm5
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm12[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,2,1]
; SSE-NEXT: pand %xmm11, %xmm4
; SSE-NEXT: por %xmm4, %xmm0
-; SSE-NEXT: pand %xmm5, %xmm0
+; SSE-NEXT: pand %xmm9, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[0,2,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm2, %xmm12
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,1,3]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm13, %xmm5
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm13[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm12[1,1,1,1,4,5,6,7]
+; SSE-NEXT: movdqa %xmm10, %xmm2
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm10[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
-; SSE-NEXT: movdqa %xmm15, %xmm10
-; SSE-NEXT: pandn %xmm3, %xmm10
+; SSE-NEXT: movdqa %xmm15, %xmm9
+; SSE-NEXT: pandn %xmm3, %xmm9
; SSE-NEXT: pand %xmm15, %xmm4
-; SSE-NEXT: por %xmm4, %xmm10
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: pandn %xmm10, %xmm3
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm7[0,0,2,1,4,5,6,7]
+; SSE-NEXT: por %xmm4, %xmm9
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
+; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: pandn %xmm9, %xmm10
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm4, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm14[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm3[0,1,1,0]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm3[0,1,1,0]
; SSE-NEXT: movdqa %xmm11, %xmm3
-; SSE-NEXT: pandn %xmm10, %xmm3
-; SSE-NEXT: pshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; SSE-NEXT: # xmm10 = mem[0,0,2,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,0,2,1]
-; SSE-NEXT: pand %xmm11, %xmm10
-; SSE-NEXT: por %xmm10, %xmm3
+; SSE-NEXT: pandn %xmm9, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm14[0,0,2,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,0,2,1]
+; SSE-NEXT: pand %xmm11, %xmm9
+; SSE-NEXT: por %xmm9, %xmm3
; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm8[0,1,1,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm12[0,1,1,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,2,1]
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: movdqa %xmm13, %xmm10
-; SSE-NEXT: pandn %xmm4, %xmm10
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[0,0,0,0,4,5,6,7]
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: pandn %xmm4, %xmm9
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm8[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,0,0]
-; SSE-NEXT: pand %xmm13, %xmm4
-; SSE-NEXT: por %xmm4, %xmm10
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm12[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: por %xmm4, %xmm9
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,0,0]
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm13, %xmm2
; SSE-NEXT: pandn %xmm4, %xmm2
-; SSE-NEXT: pand %xmm9, %xmm10
-; SSE-NEXT: por %xmm10, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm13, %xmm9
+; SSE-NEXT: por %xmm9, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
+; SSE-NEXT: movdqa %xmm4, %xmm12
+; SSE-NEXT: pandn %xmm2, %xmm12
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: por %xmm3, %xmm12
; SSE-NEXT: pshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = mem[0,1,1,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,4,5,5,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,2]
-; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pand %xmm5, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm12[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm10[0,1,2,3,5,5,5,5]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,2,2]
-; SSE-NEXT: movdqa %xmm9, %xmm5
+; SSE-NEXT: movdqa %xmm13, %xmm5
; SSE-NEXT: pandn %xmm3, %xmm5
; SSE-NEXT: pshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: # xmm3 = mem[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,2,1]
-; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: pand %xmm13, %xmm3
; SSE-NEXT: por %xmm5, %xmm3
-; SSE-NEXT: pand %xmm6, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: por %xmm3, %xmm6
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm1
+; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: pshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = mem[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-NEXT: movdqa %xmm15, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,4,4,6,5]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,4,4,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
; SSE-NEXT: pand %xmm15, %xmm2
; SSE-NEXT: por %xmm2, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,4,5,5,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE-NEXT: movdqa %xmm13, %xmm5
+; SSE-NEXT: movdqa %xmm6, %xmm5
; SSE-NEXT: pandn %xmm2, %xmm5
-; SSE-NEXT: pand %xmm13, %xmm3
+; SSE-NEXT: pand %xmm6, %xmm3
; SSE-NEXT: por %xmm3, %xmm5
; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
-; SSE-NEXT: pand %xmm14, %xmm6
+; SSE-NEXT: pand %xmm14, %xmm1
; SSE-NEXT: pandn %xmm5, %xmm14
-; SSE-NEXT: por %xmm6, %xmm14
+; SSE-NEXT: por %xmm1, %xmm14
; SSE-NEXT: pshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm5, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm1
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: pshuflw $216, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,0]
; SSE-NEXT: movdqa %xmm11, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm12[0,0,2,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1]
; SSE-NEXT: pand %xmm11, %xmm1
; SSE-NEXT: por %xmm1, %xmm3
@@ -3267,35 +3266,35 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[0,1,1,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1]
-; SSE-NEXT: movdqa %xmm13, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT: pand %xmm13, %xmm1
+; SSE-NEXT: pand %xmm6, %xmm1
; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm8[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: movdqa %xmm13, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: pand %xmm9, %xmm2
+; SSE-NEXT: pand %xmm13, %xmm2
; SSE-NEXT: por %xmm2, %xmm3
-; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm7
-; SSE-NEXT: por %xmm0, %xmm7
-; SSE-NEXT: movdqa (%rsp), %xmm5 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: pandn %xmm3, %xmm4
+; SSE-NEXT: por %xmm0, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: pand %xmm13, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: pshufhw $246, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[0,1,2,3,6,5,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pandn %xmm1, %xmm13
-; SSE-NEXT: por %xmm0, %xmm13
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pandn %xmm1, %xmm6
+; SSE-NEXT: por %xmm0, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: pand %xmm5, %xmm0
; SSE-NEXT: pshufhw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
@@ -3303,21 +3302,21 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm13, %xmm0
+; SSE-NEXT: pandn %xmm6, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,7,7,7,7]
+; SSE-NEXT: movdqa (%rsp), %xmm5 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm9, %xmm1
+; SSE-NEXT: movdqa %xmm13, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: pshufhw $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,3]
-; SSE-NEXT: pand %xmm9, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm13[0,1,2,3,6,7,7,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,6,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,2]
; SSE-NEXT: movdqa %xmm11, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
@@ -3329,12 +3328,12 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: por %xmm3, %xmm0
; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[2,2,3,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm5[2,1,3,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT: pand %xmm9, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm9
-; SSE-NEXT: por %xmm2, %xmm9
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: pandn %xmm1, %xmm13
+; SSE-NEXT: por %xmm2, %xmm13
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
; SSE-NEXT: pand %xmm15, %xmm1
; SSE-NEXT: pshufhw $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
@@ -3344,10 +3343,10 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: por %xmm1, %xmm15
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
; SSE-NEXT: pand %xmm1, %xmm15
-; SSE-NEXT: pandn %xmm9, %xmm1
+; SSE-NEXT: pandn %xmm13, %xmm1
; SSE-NEXT: por %xmm15, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[2,2,2,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[2,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,1]
; SSE-NEXT: pand %xmm11, %xmm1
; SSE-NEXT: pshufhw $216, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
@@ -3355,23 +3354,22 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,2]
; SSE-NEXT: pandn %xmm2, %xmm11
; SSE-NEXT: por %xmm1, %xmm11
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm4, %xmm11
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm13[2,2,2,2,4,5,6,7]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm11
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT: pandn %xmm1, %xmm4
-; SSE-NEXT: por %xmm11, %xmm4
+; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: por %xmm11, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
-; SSE-NEXT: pand %xmm1, %xmm4
+; SSE-NEXT: pand %xmm1, %xmm2
; SSE-NEXT: pandn %xmm3, %xmm1
-; SSE-NEXT: por %xmm1, %xmm4
+; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movdqa %xmm4, 32(%rax)
+; SSE-NEXT: movdqa %xmm1, 32(%rax)
; SSE-NEXT: movdqa %xmm0, 96(%rax)
-; SSE-NEXT: movdqa %xmm7, 112(%rax)
+; SSE-NEXT: movdqa %xmm4, 112(%rax)
; SSE-NEXT: movdqa %xmm14, 176(%rax)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, (%rax)
+; SSE-NEXT: movdqa %xmm12, (%rax)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%rax)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -3390,7 +3388,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: movaps %xmm0, 208(%rax)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 192(%rax)
-; SSE-NEXT: addq $360, %rsp # imm = 0x168
+; SSE-NEXT: addq $344, %rsp # imm = 0x158
; SSE-NEXT: retq
;
; AVX-LABEL: store_i8_stride7_vf32:
@@ -5415,193 +5413,192 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE: # %bb.0:
; SSE-NEXT: subq $648, %rsp # imm = 0x288
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movdqa 48(%rdi), %xmm14
+; SSE-NEXT: movdqa 48(%rdi), %xmm9
; SSE-NEXT: movdqa 48(%rsi), %xmm2
; SSE-NEXT: movdqa 48(%rdx), %xmm3
-; SSE-NEXT: movdqa 48(%rcx), %xmm10
+; SSE-NEXT: movdqa 48(%rcx), %xmm4
; SSE-NEXT: movdqa 48(%r8), %xmm5
; SSE-NEXT: movdqa 48(%r9), %xmm8
; SSE-NEXT: movdqa 48(%rax), %xmm13
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm14[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,2,3]
-; SSE-NEXT: movdqa %xmm2, %xmm11
+; SSE-NEXT: movdqa %xmm2, %xmm12
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,0,3]
; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
-; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm7
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[3,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm3, %xmm6
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[2,1,2,3]
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: pand %xmm10, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,1,2,3]
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm10, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm4
; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: movdqa %xmm7, %xmm0
; SSE-NEXT: pandn %xmm4, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm5[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm5, %xmm9
+; SSE-NEXT: movdqa %xmm5, %xmm10
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm4, %xmm5
; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: por %xmm0, %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255]
; SSE-NEXT: pand %xmm7, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,1,2,3]
-; SSE-NEXT: movdqa %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm8, %xmm15
; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
; SSE-NEXT: movdqa %xmm7, %xmm4
; SSE-NEXT: pandn %xmm0, %xmm4
; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
-; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
+; SSE-NEXT: pand %xmm11, %xmm4
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm11, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm11[8],xmm0[9],xmm11[9],xmm0[10],xmm11[10],xmm0[11],xmm11[11],xmm0[12],xmm11[12],xmm0[13],xmm11[13],xmm0[14],xmm11[14],xmm0[15],xmm11[15]
+; SSE-NEXT: movdqa %xmm12, %xmm0
+; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm12[8],xmm0[9],xmm12[9],xmm0[10],xmm12[10],xmm0[11],xmm12[11],xmm0[12],xmm12[12],xmm0[13],xmm12[13],xmm0[14],xmm12[14],xmm0[15],xmm12[15]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; SSE-NEXT: movdqa %xmm5, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm5
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm14[0,1,2,3,6,6,6,6]
+; SSE-NEXT: pandn %xmm3, %xmm4
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm9[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,2,2]
-; SSE-NEXT: pand %xmm4, %xmm3
-; SSE-NEXT: por %xmm3, %xmm5
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
-; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: pandn %xmm5, %xmm3
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,6,6,6,6]
-; SSE-NEXT: movdqa %xmm6, %xmm15
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,2,2,2]
+; SSE-NEXT: pand %xmm5, %xmm3
+; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: pandn %xmm4, %xmm3
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm6[0,1,2,3,6,6,6,6]
+; SSE-NEXT: movdqa %xmm6, %xmm14
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,2,2,2]
; SSE-NEXT: movdqa %xmm7, %xmm6
-; SSE-NEXT: pandn %xmm5, %xmm6
-; SSE-NEXT: movdqa %xmm10, %xmm5
-; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm10[8],xmm5[9],xmm10[9],xmm5[10],xmm10[10],xmm5[11],xmm10[11],xmm5[12],xmm10[12],xmm5[13],xmm10[13],xmm5[14],xmm10[14],xmm5[15],xmm10[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm5[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,0,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
+; SSE-NEXT: pandn %xmm4, %xmm6
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,1,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,0,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm4[0,1,2,3,5,5,5,5]
; SSE-NEXT: pand %xmm7, %xmm8
; SSE-NEXT: por %xmm6, %xmm8
-; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pand %xmm2, %xmm8
; SSE-NEXT: por %xmm3, %xmm8
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm9[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm10[0,1,2,3,5,6,6,7]
+; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,2]
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
; SSE-NEXT: movdqa %xmm4, %xmm6
; SSE-NEXT: pandn %xmm3, %xmm6
; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: por %xmm8, %xmm6
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
+; SSE-NEXT: movdqa %xmm15, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm15[8],xmm3[9],xmm15[9],xmm3[10],xmm15[10],xmm3[11],xmm15[11],xmm3[12],xmm15[12],xmm3[13],xmm15[13],xmm3[14],xmm15[14],xmm3[15],xmm15[15]
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[1,1,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm4, %xmm11
-; SSE-NEXT: pandn %xmm8, %xmm11
-; SSE-NEXT: pand %xmm4, %xmm6
-; SSE-NEXT: por %xmm6, %xmm11
-; SSE-NEXT: movdqa %xmm13, %xmm10
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm12
+; SSE-NEXT: pandn %xmm8, %xmm12
+; SSE-NEXT: pand %xmm1, %xmm6
+; SSE-NEXT: por %xmm6, %xmm12
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm13[0,1,2,3,4,5,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,3,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm1, %xmm8
+; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm15, %xmm8
; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: pand %xmm1, %xmm11
-; SSE-NEXT: por %xmm11, %xmm8
+; SSE-NEXT: pand %xmm15, %xmm12
+; SSE-NEXT: por %xmm12, %xmm8
; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,1,1,3]
-; SSE-NEXT: movdqa %xmm4, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm8
; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm15[0,1,2,3,4,5,5,7]
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm14[0,1,2,3,4,5,5,7]
+; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,3,2]
-; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: pand %xmm1, %xmm6
; SSE-NEXT: por %xmm8, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0]
; SSE-NEXT: movdqa %xmm1, %xmm8
; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: movdqa %xmm14, %xmm13
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm14[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm9[0,1,2,3,5,5,5,5]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,2,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm11
-; SSE-NEXT: pandn %xmm6, %xmm11
+; SSE-NEXT: movdqa %xmm7, %xmm12
+; SSE-NEXT: pandn %xmm6, %xmm12
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,2,1]
; SSE-NEXT: pand %xmm7, %xmm6
-; SSE-NEXT: por %xmm11, %xmm6
+; SSE-NEXT: por %xmm12, %xmm6
; SSE-NEXT: pand %xmm1, %xmm6
; SSE-NEXT: por %xmm8, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm9[0,1,2,3,4,4,6,5]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm10[0,1,2,3,4,4,6,5]
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,1,3,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm4, %xmm11
-; SSE-NEXT: pandn %xmm8, %xmm11
-; SSE-NEXT: pand %xmm4, %xmm6
-; SSE-NEXT: por %xmm6, %xmm11
+; SSE-NEXT: movdqa %xmm15, %xmm12
+; SSE-NEXT: pandn %xmm8, %xmm12
+; SSE-NEXT: pand %xmm15, %xmm6
+; SSE-NEXT: por %xmm6, %xmm12
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm3[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm9, %xmm8
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm8
; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: pand %xmm9, %xmm11
-; SSE-NEXT: movdqa %xmm9, %xmm14
-; SSE-NEXT: por %xmm11, %xmm8
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm10[0,1,2,3,4,5,5,7]
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm1, %xmm12
+; SSE-NEXT: por %xmm12, %xmm8
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm13[0,1,2,3,4,5,5,7]
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: movdqa %xmm11, %xmm9
-; SSE-NEXT: pandn %xmm6, %xmm9
-; SSE-NEXT: pand %xmm11, %xmm8
-; SSE-NEXT: por %xmm8, %xmm9
-; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm10
+; SSE-NEXT: pandn %xmm6, %xmm10
+; SSE-NEXT: pand %xmm1, %xmm8
+; SSE-NEXT: por %xmm8, %xmm10
+; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE-NEXT: movdqa %xmm11, %xmm6
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm5, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm15[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm14[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,2,2,2]
-; SSE-NEXT: pand %xmm11, %xmm5
+; SSE-NEXT: pand %xmm1, %xmm5
; SSE-NEXT: por %xmm5, %xmm6
-; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
-; SSE-NEXT: movdqa %xmm8, %xmm5
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm5
; SSE-NEXT: pandn %xmm6, %xmm5
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
-; SSE-NEXT: movdqa %xmm4, %xmm6
+; SSE-NEXT: movdqa %xmm15, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm9[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm0
; SSE-NEXT: por %xmm0, %xmm6
-; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: pand %xmm1, %xmm6
; SSE-NEXT: por %xmm5, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
; SSE-NEXT: movdqa %xmm7, %xmm5
; SSE-NEXT: pandn %xmm0, %xmm5
@@ -5609,216 +5606,216 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: por %xmm6, %xmm5
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,3]
-; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm11, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: pand %xmm11, %xmm5
; SSE-NEXT: por %xmm5, %xmm3
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,6,7,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,6,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
-; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pand %xmm5, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa (%rsi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,0,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm9, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm6
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: movdqa %xmm4, %xmm11
-; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: por %xmm3, %xmm6
; SSE-NEXT: movdqa (%rcx), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm14, %xmm5
-; SSE-NEXT: pandn %xmm0, %xmm5
-; SSE-NEXT: movdqa (%rdx), %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm14, %xmm0
-; SSE-NEXT: movdqa %xmm14, %xmm9
-; SSE-NEXT: por %xmm0, %xmm5
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: pandn %xmm5, %xmm0
-; SSE-NEXT: pand %xmm12, %xmm3
-; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: movdqa (%r9), %xmm15
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm15[2,1,2,3]
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,0]
-; SSE-NEXT: movdqa %xmm7, %xmm6
-; SSE-NEXT: pandn %xmm3, %xmm6
-; SSE-NEXT: movdqa (%r8), %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm8
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: por %xmm3, %xmm6
-; SSE-NEXT: movdqa (%rax), %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm4[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm10, %xmm8
+; SSE-NEXT: pandn %xmm3, %xmm8
+; SSE-NEXT: movdqa (%rdx), %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm2, %xmm14
-; SSE-NEXT: pandn %xmm3, %xmm14
-; SSE-NEXT: pand %xmm2, %xmm6
-; SSE-NEXT: por %xmm6, %xmm14
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255]
-; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: pand %xmm10, %xmm3
+; SSE-NEXT: por %xmm3, %xmm8
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: pandn %xmm8, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: por %xmm6, %xmm3
+; SSE-NEXT: movdqa (%r9), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,1,2,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,0]
+; SSE-NEXT: movdqa %xmm7, %xmm12
+; SSE-NEXT: pandn %xmm8, %xmm12
+; SSE-NEXT: movdqa (%r8), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: por %xmm8, %xmm12
+; SSE-NEXT: movdqa (%rax), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm11, %xmm14
+; SSE-NEXT: pandn %xmm8, %xmm14
+; SSE-NEXT: pand %xmm11, %xmm12
+; SSE-NEXT: por %xmm12, %xmm14
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255]
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: pandn %xmm14, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm3
+; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 16(%rsi), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,3]
-; SSE-NEXT: movdqa %xmm11, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: movdqa 16(%rdi), %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm11, %xmm0
-; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: movdqa 16(%rcx), %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm9, %xmm6
-; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: movdqa 16(%rdx), %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm9, %xmm0
-; SSE-NEXT: por %xmm0, %xmm6
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: pandn %xmm6, %xmm0
-; SSE-NEXT: pand %xmm12, %xmm3
-; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: movdqa 16(%r9), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,1,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,0]
-; SSE-NEXT: movdqa %xmm7, %xmm6
-; SSE-NEXT: pandn %xmm3, %xmm6
-; SSE-NEXT: movdqa 16(%r8), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: por %xmm3, %xmm6
-; SSE-NEXT: movdqa 16(%rax), %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,0,3]
+; SSE-NEXT: movdqa %xmm9, %xmm8
+; SSE-NEXT: pandn %xmm3, %xmm8
+; SSE-NEXT: movdqa 16(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm2, %xmm14
-; SSE-NEXT: pandn %xmm3, %xmm14
-; SSE-NEXT: pand %xmm2, %xmm6
-; SSE-NEXT: por %xmm6, %xmm14
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: pandn %xmm14, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm0
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 32(%rsi), %xmm0
+; SSE-NEXT: pand %xmm9, %xmm3
+; SSE-NEXT: por %xmm3, %xmm8
+; SSE-NEXT: movdqa 16(%rcx), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,0,3]
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: movdqa 32(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm11, %xmm3
-; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: movdqa 32(%rcx), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,1,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: movdqa %xmm9, %xmm5
-; SSE-NEXT: movdqa %xmm9, %xmm6
-; SSE-NEXT: pandn %xmm3, %xmm6
-; SSE-NEXT: movdqa 32(%rdx), %xmm9
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm9[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm10, %xmm12
+; SSE-NEXT: pandn %xmm3, %xmm12
+; SSE-NEXT: movdqa 16(%rdx), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm5, %xmm3
-; SSE-NEXT: por %xmm3, %xmm6
-; SSE-NEXT: pand %xmm12, %xmm0
-; SSE-NEXT: pandn %xmm6, %xmm12
-; SSE-NEXT: por %xmm0, %xmm12
-; SSE-NEXT: movdqa 32(%r9), %xmm0
+; SSE-NEXT: pand %xmm10, %xmm3
+; SSE-NEXT: por %xmm3, %xmm12
+; SSE-NEXT: movdqa %xmm4, %xmm3
+; SSE-NEXT: pandn %xmm12, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: por %xmm8, %xmm3
+; SSE-NEXT: movdqa 16(%r9), %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
-; SSE-NEXT: movdqa %xmm7, %xmm3
-; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: movdqa 32(%r8), %xmm11
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[3,3,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,1,2,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,0]
+; SSE-NEXT: movdqa %xmm7, %xmm12
+; SSE-NEXT: pandn %xmm8, %xmm12
+; SSE-NEXT: movdqa 16(%r8), %xmm0
+; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm7, %xmm8
+; SSE-NEXT: por %xmm8, %xmm12
+; SSE-NEXT: movdqa 16(%rax), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm0[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm11, %xmm14
+; SSE-NEXT: pandn %xmm8, %xmm14
+; SSE-NEXT: pand %xmm11, %xmm12
+; SSE-NEXT: por %xmm12, %xmm14
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: pandn %xmm14, %xmm1
; SSE-NEXT: pand %xmm2, %xmm3
-; SSE-NEXT: movdqa 32(%rax), %xmm13
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[3,3,3,3,4,5,6,7]
+; SSE-NEXT: por %xmm3, %xmm1
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 32(%rsi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[3,1,0,3]
+; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: pandn %xmm8, %xmm3
+; SSE-NEXT: movdqa 32(%rdi), %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm1[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm14
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm9, %xmm8
+; SSE-NEXT: por %xmm8, %xmm3
+; SSE-NEXT: movdqa 32(%rcx), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,1,2,3]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,0,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
+; SSE-NEXT: movdqa %xmm10, %xmm12
+; SSE-NEXT: pandn %xmm8, %xmm12
+; SSE-NEXT: movdqa 32(%rdx), %xmm6
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm6[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm10, %xmm8
+; SSE-NEXT: por %xmm8, %xmm12
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pandn %xmm12, %xmm0
+; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm9
+; SSE-NEXT: movdqa 32(%r9), %xmm13
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[2,1,2,3]
; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: pand %xmm10, %xmm12
-; SSE-NEXT: pandn %xmm2, %xmm10
-; SSE-NEXT: por %xmm12, %xmm10
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,0]
+; SSE-NEXT: movdqa %xmm7, %xmm8
+; SSE-NEXT: pandn %xmm3, %xmm8
+; SSE-NEXT: movdqa 32(%r8), %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[3,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pshufhw $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: # xmm0 = mem[0,1,2,3,6,6,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: pand %xmm12, %xmm0
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
-; SSE-NEXT: movdqa %xmm14, %xmm0
-; SSE-NEXT: pandn %xmm1, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,6,6,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pand %xmm7, %xmm3
+; SSE-NEXT: por %xmm3, %xmm8
+; SSE-NEXT: pand %xmm11, %xmm8
+; SSE-NEXT: movdqa 32(%rax), %xmm4
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm4[3,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
+; SSE-NEXT: pandn %xmm3, %xmm11
+; SSE-NEXT: por %xmm8, %xmm11
+; SSE-NEXT: pand %xmm2, %xmm9
+; SSE-NEXT: pandn %xmm11, %xmm2
+; SSE-NEXT: por %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa %xmm10, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: pshufhw $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: # xmm1 = mem[0,1,2,3,6,6,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
+; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm11, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,6,6,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
+; SSE-NEXT: movdqa %xmm7, %xmm8
+; SSE-NEXT: pandn %xmm1, %xmm8
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; SSE-NEXT: movdqa %xmm5, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
@@ -5827,587 +5824,585 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: pand %xmm7, %xmm1
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: pand %xmm14, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[1,1,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,5,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,2]
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm4, %xmm15
-; SSE-NEXT: pandn %xmm3, %xmm15
-; SSE-NEXT: pand %xmm4, %xmm2
-; SSE-NEXT: por %xmm2, %xmm15
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0]
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm15, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,7]
+; SSE-NEXT: por %xmm8, %xmm1
+; SSE-NEXT: pand %xmm11, %xmm1
+; SSE-NEXT: por %xmm3, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,2,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm8
+; SSE-NEXT: pandn %xmm3, %xmm8
+; SSE-NEXT: pshufhw $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: # xmm3 = mem[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,2]
+; SSE-NEXT: pand %xmm2, %xmm3
+; SSE-NEXT: por %xmm8, %xmm3
+; SSE-NEXT: pshufhw $164, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; SSE-NEXT: # xmm8 = mem[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,1,3,3]
+; SSE-NEXT: movdqa %xmm15, %xmm12
+; SSE-NEXT: pandn %xmm8, %xmm12
+; SSE-NEXT: pand %xmm15, %xmm3
+; SSE-NEXT: por %xmm3, %xmm12
+; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: pandn %xmm12, %xmm3
+; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: movdqa %xmm12, %xmm8
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
; SSE-NEXT: pshufhw $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; SSE-NEXT: pand %xmm12, %xmm1
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm14, %xmm12
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm11, %xmm8
+; SSE-NEXT: pandn %xmm3, %xmm8
; SSE-NEXT: pshufhw $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,2,3]
+; SSE-NEXT: movdqa %xmm7, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: pand %xmm7, %xmm1
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: pand %xmm14, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: pshufhw $233, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: # xmm2 = mem[0,1,2,3,5,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,2]
-; SSE-NEXT: pand %xmm14, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: pshufhw $164, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: # xmm3 = mem[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
-; SSE-NEXT: movdqa %xmm4, %xmm15
-; SSE-NEXT: pandn %xmm3, %xmm15
-; SSE-NEXT: pand %xmm4, %xmm2
-; SSE-NEXT: por %xmm2, %xmm15
-; SSE-NEXT: movdqa %xmm10, %xmm0
-; SSE-NEXT: pandn %xmm15, %xmm0
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,7]
+; SSE-NEXT: pand %xmm11, %xmm1
+; SSE-NEXT: por %xmm8, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; SSE-NEXT: movdqa %xmm2, %xmm8
+; SSE-NEXT: pandn %xmm3, %xmm8
+; SSE-NEXT: pshufhw $233, (%rsp), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: # xmm3 = mem[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,2]
+; SSE-NEXT: pand %xmm2, %xmm3
+; SSE-NEXT: por %xmm8, %xmm3
+; SSE-NEXT: pshufhw $164, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; SSE-NEXT: # xmm8 = mem[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,1,3,3]
+; SSE-NEXT: movdqa %xmm15, %xmm12
+; SSE-NEXT: pandn %xmm8, %xmm12
+; SSE-NEXT: pand %xmm15, %xmm3
+; SSE-NEXT: por %xmm3, %xmm12
+; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: pandn %xmm12, %xmm3
+; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: movdqa %xmm8, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pshufhw $170, (%rsp), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: # xmm1 = mem[0,1,2,3,6,6,6,6]
+; SSE-NEXT: movdqa %xmm10, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm14[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; SSE-NEXT: pand %xmm8, %xmm1
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm12, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,6,6,6,6]
+; SSE-NEXT: pand %xmm10, %xmm1
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm11, %xmm8
+; SSE-NEXT: pandn %xmm3, %xmm8
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,6,6,6,6]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,2,3]
+; SSE-NEXT: movdqa %xmm7, %xmm3
+; SSE-NEXT: pandn %xmm1, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: pand %xmm7, %xmm1
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: pand %xmm12, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm11[0,1,2,3,5,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,2]
-; SSE-NEXT: pand %xmm14, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,4,5,6,6]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,3]
-; SSE-NEXT: movdqa %xmm4, %xmm15
-; SSE-NEXT: pandn %xmm3, %xmm15
-; SSE-NEXT: pand %xmm4, %xmm2
-; SSE-NEXT: por %xmm2, %xmm15
-; SSE-NEXT: pand %xmm10, %xmm1
-; SSE-NEXT: pandn %xmm15, %xmm10
-; SSE-NEXT: por %xmm1, %xmm10
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pand %xmm11, %xmm1
+; SSE-NEXT: por %xmm8, %xmm1
+; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[1,1,2,3]
+; SSE-NEXT: movdqa %xmm2, %xmm8
+; SSE-NEXT: pandn %xmm3, %xmm8
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,2]
+; SSE-NEXT: pand %xmm2, %xmm3
+; SSE-NEXT: por %xmm8, %xmm3
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm4[0,1,2,3,4,5,6,6]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,1,3,3]
+; SSE-NEXT: movdqa %xmm15, %xmm12
+; SSE-NEXT: pandn %xmm8, %xmm12
+; SSE-NEXT: pand %xmm15, %xmm3
+; SSE-NEXT: por %xmm3, %xmm12
+; SSE-NEXT: pand %xmm9, %xmm1
+; SSE-NEXT: pandn %xmm12, %xmm9
+; SSE-NEXT: por %xmm1, %xmm9
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[0,0,2,1,4,5,6,7]
-; SSE-NEXT: movdqa %xmm5, %xmm14
+; SSE-NEXT: movdqa %xmm5, %xmm11
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm6, %xmm8
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[0,0,0,0,4,5,6,7]
+; SSE-NEXT: movdqa %xmm15, %xmm8
+; SSE-NEXT: pandn %xmm1, %xmm8
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE-NEXT: pand %xmm4, %xmm1
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm0, %xmm15
-; SSE-NEXT: pandn %xmm2, %xmm15
+; SSE-NEXT: pand %xmm15, %xmm1
+; SSE-NEXT: por %xmm1, %xmm8
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm12
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pandn %xmm8, %xmm12
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm1[0,1,1,0]
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: pandn %xmm8, %xmm1
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,1,0]
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
-; SSE-NEXT: movdqa %xmm9, %xmm1
-; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,0,2,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT: pand %xmm9, %xmm2
-; SSE-NEXT: movdqa %xmm9, %xmm6
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: por %xmm15, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm5[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: movdqa %xmm0, %xmm15
-; SSE-NEXT: pandn %xmm2, %xmm15
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm4[0,0,2,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,2,1]
+; SSE-NEXT: pand %xmm3, %xmm8
+; SSE-NEXT: movdqa %xmm3, %xmm9
+; SSE-NEXT: por %xmm8, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: por %xmm12, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm6[0,1,1,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,2,1]
+; SSE-NEXT: movdqa %xmm10, %xmm12
+; SSE-NEXT: pandn %xmm8, %xmm12
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm13[0,0,0,0,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: por %xmm2, %xmm15
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm11[0,0,0,0,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm13[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,0,0]
+; SSE-NEXT: pand %xmm10, %xmm8
+; SSE-NEXT: por %xmm8, %xmm12
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm2[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,0,0]
; SSE-NEXT: movdqa %xmm7, %xmm10
-; SSE-NEXT: pandn %xmm2, %xmm10
-; SSE-NEXT: pand %xmm7, %xmm15
-; SSE-NEXT: por %xmm15, %xmm10
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pandn %xmm10, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,1,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: pandn %xmm1, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[1,1,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm3, %xmm9
+; SSE-NEXT: pandn %xmm8, %xmm10
+; SSE-NEXT: pand %xmm7, %xmm12
+; SSE-NEXT: por %xmm12, %xmm10
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: pandn %xmm10, %xmm8
+; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: por %xmm1, %xmm8
+; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,2]
+; SSE-NEXT: movdqa %xmm7, %xmm8
+; SSE-NEXT: pandn %xmm1, %xmm8
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[1,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,0,2,1]
; SSE-NEXT: pand %xmm7, %xmm10
-; SSE-NEXT: por %xmm2, %xmm10
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; SSE-NEXT: movdqa %xmm0, %xmm15
-; SSE-NEXT: movdqa %xmm0, %xmm12
-; SSE-NEXT: pandn %xmm10, %xmm15
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm14[1,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm2[0,1,2,3,7,5,6,4]
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: movdqa %xmm6, %xmm2
-; SSE-NEXT: pandn %xmm10, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm8[1,1,2,2,4,5,6,7]
+; SSE-NEXT: por %xmm8, %xmm10
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm14
+; SSE-NEXT: pandn %xmm10, %xmm8
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm11[1,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,5,6,4]
+; SSE-NEXT: movdqa %xmm9, %xmm12
+; SSE-NEXT: pandn %xmm10, %xmm12
+; SSE-NEXT: movdqa %xmm5, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm5[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,0,2,1]
-; SSE-NEXT: pand %xmm0, %xmm10
-; SSE-NEXT: por %xmm10, %xmm2
-; SSE-NEXT: pand %xmm12, %xmm2
-; SSE-NEXT: por %xmm15, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm5[0,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,1,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm0, %xmm15
-; SSE-NEXT: pandn %xmm10, %xmm15
-; SSE-NEXT: movdqa %xmm13, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm13[1,1,1,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,0,0,0]
-; SSE-NEXT: pand %xmm0, %xmm10
-; SSE-NEXT: por %xmm10, %xmm15
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm11[1,1,1,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,0,0,0]
-; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm13, %xmm0
-; SSE-NEXT: pandn %xmm10, %xmm0
-; SSE-NEXT: pand %xmm13, %xmm15
-; SSE-NEXT: por %xmm15, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: pand %xmm10, %xmm2
-; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: pand %xmm9, %xmm10
+; SSE-NEXT: por %xmm10, %xmm12
+; SSE-NEXT: pand %xmm1, %xmm12
+; SSE-NEXT: por %xmm8, %xmm12
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm6[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,1,3]
+; SSE-NEXT: movdqa %xmm15, %xmm10
+; SSE-NEXT: pandn %xmm8, %xmm10
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm13[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,0,0]
+; SSE-NEXT: pand %xmm15, %xmm8
+; SSE-NEXT: por %xmm8, %xmm10
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm2[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,0,0]
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pandn %xmm8, %xmm9
+; SSE-NEXT: pand %xmm1, %xmm10
+; SSE-NEXT: movdqa %xmm1, %xmm8
+; SSE-NEXT: por %xmm10, %xmm9
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: pandn %xmm9, %xmm1
+; SSE-NEXT: pand %xmm5, %xmm12
+; SSE-NEXT: por %xmm12, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,3,3]
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[2,1,3,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm8, %xmm10
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm11[2,2,3,3]
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: pandn %xmm5, %xmm9
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[2,1,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm3, %xmm11
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,2,1]
+; SSE-NEXT: pand %xmm7, %xmm5
+; SSE-NEXT: por %xmm9, %xmm5
+; SSE-NEXT: movdqa %xmm14, %xmm9
+; SSE-NEXT: pandn %xmm5, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm13, %xmm15
-; SSE-NEXT: movdqa %xmm13, %xmm8
-; SSE-NEXT: pandn %xmm0, %xmm8
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[0,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm8, %xmm10
+; SSE-NEXT: pandn %xmm0, %xmm10
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: pand %xmm13, %xmm0
-; SSE-NEXT: por %xmm0, %xmm8
-; SSE-NEXT: pand %xmm12, %xmm8
-; SSE-NEXT: por %xmm2, %xmm8
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: pand %xmm14, %xmm10
+; SSE-NEXT: por %xmm9, %xmm10
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: pandn %xmm8, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm5[0,1,2,3,4,6,5,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,2]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,3,2]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: pandn %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT: pand %xmm1, %xmm2
-; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm11[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm13, %xmm12
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm13[2,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,2,1]
+; SSE-NEXT: pand %xmm1, %xmm6
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa %xmm2, %xmm10
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm2[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm1, %xmm8
-; SSE-NEXT: pandn %xmm2, %xmm8
-; SSE-NEXT: pand %xmm1, %xmm5
-; SSE-NEXT: por %xmm5, %xmm8
-; SSE-NEXT: pand %xmm6, %xmm8
-; SSE-NEXT: por %xmm0, %xmm8
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,1,1,3]
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,4,5,5,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,2]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0]
; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: pandn %xmm0, %xmm5
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm9[0,1,2,3,5,5,5,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm8
-; SSE-NEXT: pandn %xmm0, %xmm8
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm14[1,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm7, %xmm2
-; SSE-NEXT: por %xmm8, %xmm2
-; SSE-NEXT: pand %xmm1, %xmm2
-; SSE-NEXT: por %xmm5, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[1,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: movdqa %xmm15, %xmm5
-; SSE-NEXT: pandn %xmm0, %xmm5
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,4,4,6,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
-; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: pandn %xmm6, %xmm5
+; SSE-NEXT: pand %xmm1, %xmm9
+; SSE-NEXT: por %xmm9, %xmm5
+; SSE-NEXT: pand %xmm3, %xmm5
; SSE-NEXT: por %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
+; SSE-NEXT: movdqa %xmm1, %xmm6
+; SSE-NEXT: pandn %xmm0, %xmm6
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,5,5,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,2]
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa %xmm4, %xmm8
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[1,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: por %xmm9, %xmm0
+; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm3[1,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm4, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm13[0,1,2,3,4,4,6,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,3,3]
+; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm10[0,1,2,3,4,5,5,7]
+; SSE-NEXT: movdqa %xmm10, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa %xmm13, %xmm10
+; SSE-NEXT: pandn %xmm6, %xmm10
+; SSE-NEXT: pand %xmm13, %xmm9
+; SSE-NEXT: por %xmm9, %xmm10
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm6, %xmm4
+; SSE-NEXT: pandn %xmm10, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: por %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,6,5,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: movdqa %xmm12, %xmm8
-; SSE-NEXT: pandn %xmm0, %xmm8
-; SSE-NEXT: pand %xmm12, %xmm5
-; SSE-NEXT: por %xmm5, %xmm8
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: pandn %xmm8, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,6,5,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: movdqa %xmm13, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: pand %xmm13, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
-; SSE-NEXT: movdqa %xmm5, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm4, %xmm6
+; SSE-NEXT: pandn %xmm2, %xmm6
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,1,3,3]
+; SSE-NEXT: movdqa %xmm15, %xmm0
; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm14[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[2,1,3,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: pandn %xmm6, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm9[0,1,2,3,7,7,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,2,2,2]
-; SSE-NEXT: pand %xmm3, %xmm6
-; SSE-NEXT: movdqa %xmm3, %xmm10
-; SSE-NEXT: por %xmm6, %xmm2
-; SSE-NEXT: pand %xmm5, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,7,7,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm6
-; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,5,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,3]
-; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,2,2]
+; SSE-NEXT: pand %xmm15, %xmm2
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm11[0,1,2,3,6,7,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,2,2]
+; SSE-NEXT: movdqa %xmm7, %xmm6
+; SSE-NEXT: pandn %xmm2, %xmm6
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,5,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,2,3]
+; SSE-NEXT: pand %xmm7, %xmm2
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm5[0,1,2,3,6,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,3,2]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
-; SSE-NEXT: movdqa %xmm1, %xmm8
-; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: por %xmm0, %xmm8
+; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm13, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: pand %xmm13, %xmm2
+; SSE-NEXT: movdqa %xmm13, %xmm8
+; SSE-NEXT: por %xmm2, %xmm9
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: pandn %xmm8, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm2
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[0,0,2,1,4,5,6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pandn %xmm9, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm15, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm6
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,0]
-; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm8, %xmm0
; SSE-NEXT: pandn %xmm2, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,0,2,1,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm10[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm2
; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,1,1,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm12[0,1,1,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT: movdqa %xmm12, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa %xmm3, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm13[0,0,0,0,4,5,6,7]
+; SSE-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm14[0,0,0,0,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; SSE-NEXT: movdqa %xmm7, %xmm8
-; SSE-NEXT: pandn %xmm2, %xmm8
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: pandn %xmm2, %xmm9
; SSE-NEXT: pand %xmm7, %xmm6
-; SSE-NEXT: por %xmm6, %xmm8
+; SSE-NEXT: por %xmm6, %xmm9
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm6
-; SSE-NEXT: pandn %xmm8, %xmm6
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pandn %xmm9, %xmm3
; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: por %xmm0, %xmm6
-; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,1,2,2]
; SSE-NEXT: movdqa %xmm7, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[1,1,2,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[1,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
; SSE-NEXT: pand %xmm7, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: movdqa %xmm14, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,2,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[1,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm0[0,1,2,3,7,5,6,4]
-; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm8, %xmm0
; SSE-NEXT: pandn %xmm6, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm11[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,2,1]
-; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm6
; SSE-NEXT: por %xmm6, %xmm0
-; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: pand %xmm14, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm12[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
-; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: movdqa %xmm15, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm13[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; SSE-NEXT: pand %xmm10, %xmm2
+; SSE-NEXT: pand %xmm15, %xmm2
; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm14[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; SSE-NEXT: movdqa %xmm15, %xmm8
-; SSE-NEXT: pandn %xmm2, %xmm8
-; SSE-NEXT: pand %xmm15, %xmm6
-; SSE-NEXT: movdqa %xmm15, %xmm10
-; SSE-NEXT: por %xmm6, %xmm8
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm6, %xmm2
-; SSE-NEXT: pandn %xmm8, %xmm2
-; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm8, %xmm9
+; SSE-NEXT: pandn %xmm2, %xmm9
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: pandn %xmm9, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,2,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,3,3]
; SSE-NEXT: movdqa %xmm7, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[2,1,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm11, %xmm14
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
; SSE-NEXT: pand %xmm7, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm15, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm10
+; SSE-NEXT: movdqa %xmm8, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: pand %xmm8, %xmm0
; SSE-NEXT: por %xmm0, %xmm6
-; SSE-NEXT: pand %xmm12, %xmm6
+; SSE-NEXT: pand %xmm3, %xmm6
; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
-; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
+; SSE-NEXT: movdqa %xmm3, %xmm0
; SSE-NEXT: pandn %xmm6, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,6,5,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,4,6,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,2]
-; SSE-NEXT: movdqa %xmm4, %xmm12
-; SSE-NEXT: movdqa %xmm4, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm5, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm13, %xmm5
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm13[2,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm1, %xmm8
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,1]
-; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: pand %xmm5, %xmm2
; SSE-NEXT: por %xmm2, %xmm6
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm14[2,2,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm2[0,0,0,0]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: pandn %xmm8, %xmm4
-; SSE-NEXT: pand %xmm2, %xmm6
-; SSE-NEXT: por %xmm6, %xmm4
-; SSE-NEXT: pand %xmm9, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[0,1,1,3]
-; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm2[0,0,0,0]
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pandn %xmm9, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm6
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,1,3]
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,5,5,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm14[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,2]
-; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0]
-; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,5,5,5,5]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,5,5,5,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm8
-; SSE-NEXT: pandn %xmm0, %xmm8
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[1,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: por %xmm8, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: por %xmm9, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm13[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
-; SSE-NEXT: movdqa %xmm10, %xmm8
-; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm5[0,1,2,3,4,4,6,5]
+; SSE-NEXT: movdqa %xmm10, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm8[0,1,2,3,4,4,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,1,3,3]
; SSE-NEXT: pand %xmm10, %xmm6
-; SSE-NEXT: por %xmm6, %xmm8
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm14[0,1,2,3,4,5,5,7]
-; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm4[0,1,2,3,4,5,5,7]
+; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm10
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm10
; SSE-NEXT: pandn %xmm6, %xmm10
-; SSE-NEXT: pand %xmm3, %xmm8
-; SSE-NEXT: movdqa %xmm3, %xmm6
-; SSE-NEXT: por %xmm8, %xmm10
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm9
+; SSE-NEXT: movdqa %xmm2, %xmm12
+; SSE-NEXT: por %xmm9, %xmm10
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: pandn %xmm10, %xmm4
-; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: por %xmm0, %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm15[0,1,2,3,6,5,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,6,5,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: movdqa %xmm12, %xmm6
; SSE-NEXT: pandn %xmm0, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm14[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm3, %xmm14
+; SSE-NEXT: pand %xmm12, %xmm0
; SSE-NEXT: por %xmm0, %xmm6
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: pandn %xmm6, %xmm8
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm9[0,1,2,3,4,6,6,7]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,1,3,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm15, %xmm0
; SSE-NEXT: pandn %xmm6, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm11[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,2,2,2]
-; SSE-NEXT: pand %xmm4, %xmm6
-; SSE-NEXT: movdqa %xmm4, %xmm11
+; SSE-NEXT: pand %xmm15, %xmm6
; SSE-NEXT: por %xmm6, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: por %xmm8, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm5[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: por %xmm9, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm8[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,2,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm8
-; SSE-NEXT: pandn %xmm6, %xmm8
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm13[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,2,2,3]
; SSE-NEXT: pand %xmm7, %xmm6
-; SSE-NEXT: por %xmm8, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm2[0,1,2,3,6,7,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[2,1,3,2]
-; SSE-NEXT: movdqa %xmm12, %xmm10
-; SSE-NEXT: pandn %xmm8, %xmm10
-; SSE-NEXT: pand %xmm12, %xmm6
-; SSE-NEXT: movdqa %xmm12, %xmm4
+; SSE-NEXT: por %xmm9, %xmm6
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm1[0,1,2,3,6,7,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[2,1,3,2]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm4, %xmm10
+; SSE-NEXT: pandn %xmm9, %xmm10
+; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: movdqa %xmm4, %xmm11
; SSE-NEXT: por %xmm6, %xmm10
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0]
; SSE-NEXT: movdqa %xmm1, %xmm2
@@ -6415,278 +6410,273 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[0,0,2,1,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: movdqa %xmm11, %xmm1
-; SSE-NEXT: movdqa %xmm11, %xmm8
-; SSE-NEXT: pandn %xmm0, %xmm8
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[0,0,0,0,4,5,6,7]
+; SSE-NEXT: movdqa %xmm15, %xmm6
+; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: por %xmm0, %xmm8
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm3, %xmm10
-; SSE-NEXT: pandn %xmm8, %xmm10
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[0,2,1,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm0[0,1,1,0]
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: pandn %xmm8, %xmm0
-; SSE-NEXT: movdqa (%rsp), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm13[0,0,2,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,2,1]
-; SSE-NEXT: pand %xmm12, %xmm8
-; SSE-NEXT: por %xmm8, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: por %xmm10, %xmm0
+; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: por %xmm0, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,2,1]
-; SSE-NEXT: movdqa %xmm14, %xmm10
-; SSE-NEXT: pandn %xmm8, %xmm10
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm1[0,0,0,0,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,0,0]
-; SSE-NEXT: pand %xmm14, %xmm8
-; SSE-NEXT: por %xmm8, %xmm10
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm6[0,0,0,0,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,0,0]
-; SSE-NEXT: movdqa %xmm7, %xmm15
-; SSE-NEXT: pandn %xmm8, %xmm15
-; SSE-NEXT: pand %xmm7, %xmm10
-; SSE-NEXT: por %xmm10, %xmm15
-; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
-; SSE-NEXT: movdqa %xmm8, %xmm10
-; SSE-NEXT: pandn %xmm15, %xmm10
-; SSE-NEXT: pand %xmm8, %xmm0
-; SSE-NEXT: por %xmm0, %xmm10
-; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,1,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm8
-; SSE-NEXT: pandn %xmm0, %xmm8
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[1,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: por %xmm8, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; SSE-NEXT: movdqa %xmm12, %xmm8
-; SSE-NEXT: pandn %xmm0, %xmm8
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,2,1]
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm0[0,1,2,3,7,5,6,4]
-; SSE-NEXT: movdqa %xmm4, %xmm14
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,2,1,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[0,1,1,0]
; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: pandn %xmm10, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm11[1,1,2,2,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,0,2,1]
-; SSE-NEXT: pand %xmm4, %xmm10
-; SSE-NEXT: por %xmm10, %xmm0
-; SSE-NEXT: pand %xmm12, %xmm0
-; SSE-NEXT: movdqa %xmm12, %xmm4
-; SSE-NEXT: por %xmm8, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[0,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,1,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm10
-; SSE-NEXT: pandn %xmm8, %xmm10
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm1[1,1,1,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,0,0]
-; SSE-NEXT: pand %xmm2, %xmm8
-; SSE-NEXT: por %xmm8, %xmm10
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm6[1,1,1,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,0,0,0]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm15
-; SSE-NEXT: pandn %xmm8, %xmm15
-; SSE-NEXT: pand %xmm2, %xmm10
-; SSE-NEXT: movdqa %xmm2, %xmm12
-; SSE-NEXT: por %xmm10, %xmm15
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm8
-; SSE-NEXT: pandn %xmm15, %xmm8
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: por %xmm0, %xmm8
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,2,3,3]
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm2[0,0,2,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,2,1]
+; SSE-NEXT: pand %xmm4, %xmm6
+; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: por %xmm9, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[0,1,1,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,2,1]
+; SSE-NEXT: movdqa %xmm12, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm1[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
+; SSE-NEXT: pand %xmm12, %xmm6
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm4[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
; SSE-NEXT: movdqa %xmm7, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[2,1,3,3,4,5,6,7]
+; SSE-NEXT: pandn %xmm6, %xmm10
+; SSE-NEXT: pand %xmm7, %xmm9
+; SSE-NEXT: por %xmm9, %xmm10
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
+; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: pandn %xmm10, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: por %xmm0, %xmm9
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,2,2]
+; SSE-NEXT: movdqa %xmm7, %xmm6
+; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[1,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: por %xmm10, %xmm0
-; SSE-NEXT: movdqa %xmm4, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,5,6,6,7]
+; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; SSE-NEXT: movdqa %xmm14, %xmm6
+; SSE-NEXT: pandn %xmm0, %xmm6
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[1,1,2,1]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm0[0,1,2,3,7,5,6,4]
+; SSE-NEXT: movdqa %xmm11, %xmm0
+; SSE-NEXT: pandn %xmm9, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm8[1,1,2,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,0,2,1]
+; SSE-NEXT: pand %xmm11, %xmm9
+; SSE-NEXT: movdqa %xmm11, %xmm12
+; SSE-NEXT: por %xmm9, %xmm0
+; SSE-NEXT: pand %xmm14, %xmm0
+; SSE-NEXT: por %xmm6, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,1,3]
+; SSE-NEXT: movdqa %xmm15, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm1[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
+; SSE-NEXT: pand %xmm15, %xmm6
+; SSE-NEXT: por %xmm6, %xmm9
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm4[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,0,0]
+; SSE-NEXT: movdqa {{.*#+}} xmm11 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm11, %xmm10
+; SSE-NEXT: pandn %xmm6, %xmm10
+; SSE-NEXT: pand %xmm11, %xmm9
+; SSE-NEXT: por %xmm9, %xmm10
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: pandn %xmm10, %xmm9
+; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: por %xmm0, %xmm9
+; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[2,2,3,3]
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[2,1,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
+; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: por %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm14, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm12, %xmm15
-; SSE-NEXT: pandn %xmm0, %xmm15
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[0,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm11, %xmm10
+; SSE-NEXT: pandn %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: pand %xmm12, %xmm0
-; SSE-NEXT: por %xmm0, %xmm15
-; SSE-NEXT: pand %xmm4, %xmm15
-; SSE-NEXT: por %xmm10, %xmm15
+; SSE-NEXT: pand %xmm11, %xmm0
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: pand %xmm14, %xmm10
+; SSE-NEXT: por %xmm9, %xmm10
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
-; SSE-NEXT: movdqa %xmm2, %xmm10
-; SSE-NEXT: pandn %xmm15, %xmm10
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,4,6,5,7]
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pandn %xmm10, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,4,6,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,2]
-; SSE-NEXT: movdqa %xmm14, %xmm15
-; SSE-NEXT: pandn %xmm0, %xmm15
-; SSE-NEXT: movdqa %xmm1, %xmm8
+; SSE-NEXT: movdqa %xmm12, %xmm10
+; SSE-NEXT: pandn %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[2,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm14, %xmm0
-; SSE-NEXT: movdqa %xmm14, %xmm4
-; SSE-NEXT: por %xmm0, %xmm15
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: pandn %xmm0, %xmm5
+; SSE-NEXT: pand %xmm1, %xmm10
+; SSE-NEXT: por %xmm10, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: por %xmm9, %xmm5
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,1,1,3]
; SSE-NEXT: movdqa %xmm1, %xmm9
; SSE-NEXT: pandn %xmm0, %xmm9
-; SSE-NEXT: pand %xmm1, %xmm15
-; SSE-NEXT: por %xmm15, %xmm9
-; SSE-NEXT: pand %xmm2, %xmm9
-; SSE-NEXT: por %xmm10, %xmm9
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,5,5,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,2]
; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: por %xmm10, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm13[0,1,2,3,5,5,5,5]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[2,2,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm15
-; SSE-NEXT: pandn %xmm10, %xmm15
+; SSE-NEXT: por %xmm9, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm3[0,1,2,3,5,5,5,5]
+; SSE-NEXT: movdqa %xmm3, %xmm12
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[2,2,2,2]
+; SSE-NEXT: movdqa %xmm7, %xmm10
+; SSE-NEXT: pandn %xmm9, %xmm10
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm3[1,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,0,2,1]
-; SSE-NEXT: pand %xmm7, %xmm10
-; SSE-NEXT: por %xmm15, %xmm10
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm3[1,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,0,2,1]
+; SSE-NEXT: pand %xmm7, %xmm9
+; SSE-NEXT: por %xmm10, %xmm9
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0]
-; SSE-NEXT: pand %xmm1, %xmm10
+; SSE-NEXT: pand %xmm1, %xmm9
; SSE-NEXT: pandn %xmm0, %xmm1
-; SSE-NEXT: por %xmm10, %xmm1
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[1,2,2,3,4,5,6,7]
+; SSE-NEXT: por %xmm9, %xmm1
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: movdqa %xmm12, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,4,6,5]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,4,6,5]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
-; SSE-NEXT: pand %xmm12, %xmm0
-; SSE-NEXT: por %xmm0, %xmm10
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,5,5,7]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: por %xmm0, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,4,5,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: movdqa %xmm14, %xmm15
-; SSE-NEXT: pandn %xmm0, %xmm15
-; SSE-NEXT: pand %xmm14, %xmm10
-; SSE-NEXT: por %xmm10, %xmm15
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
+; SSE-NEXT: movdqa %xmm2, %xmm10
+; SSE-NEXT: pandn %xmm0, %xmm10
+; SSE-NEXT: pand %xmm2, %xmm9
+; SSE-NEXT: por %xmm9, %xmm10
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255]
; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pandn %xmm15, %xmm0
+; SSE-NEXT: pandn %xmm10, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,6,5,7,7]
+; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,6,5,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: movdqa %xmm14, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,7,7,7,7]
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: pand %xmm14, %xmm0
-; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: por %xmm0, %xmm9
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm3[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; SSE-NEXT: movdqa %xmm1, %xmm15
-; SSE-NEXT: pandn %xmm0, %xmm15
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,7,7,7,7]
+; SSE-NEXT: movdqa %xmm15, %xmm10
+; SSE-NEXT: pandn %xmm0, %xmm10
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm12[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: por %xmm0, %xmm15
+; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: por %xmm0, %xmm10
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255]
-; SSE-NEXT: pand %xmm0, %xmm15
-; SSE-NEXT: pandn %xmm10, %xmm0
-; SSE-NEXT: por %xmm15, %xmm0
+; SSE-NEXT: pand %xmm0, %xmm10
+; SSE-NEXT: pandn %xmm9, %xmm0
+; SSE-NEXT: por %xmm10, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,7,7,7,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; SSE-NEXT: movdqa %xmm7, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,5,6,6,7]
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,3]
; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: por %xmm10, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm6[0,1,2,3,6,7,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[2,1,3,2]
-; SSE-NEXT: movdqa %xmm4, %xmm14
-; SSE-NEXT: movdqa %xmm4, %xmm15
-; SSE-NEXT: pandn %xmm10, %xmm15
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: por %xmm0, %xmm15
+; SSE-NEXT: por %xmm9, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm4[0,1,2,3,6,7,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[2,1,3,2]
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0]
+; SSE-NEXT: movdqa %xmm12, %xmm10
+; SSE-NEXT: pandn %xmm9, %xmm10
+; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: por %xmm0, %xmm10
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0]
; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pandn %xmm15, %xmm0
+; SSE-NEXT: pandn %xmm10, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm12[0,0,2,1,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: movdqa %xmm2, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
+; SSE-NEXT: movdqa %xmm15, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm2, %xmm6
-; SSE-NEXT: por %xmm0, %xmm10
+; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: por %xmm0, %xmm9
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[0,2,1,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
-; SSE-NEXT: movdqa %xmm4, %xmm15
-; SSE-NEXT: pandn %xmm0, %xmm15
+; SSE-NEXT: movdqa %xmm12, %xmm10
+; SSE-NEXT: pandn %xmm0, %xmm10
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: por %xmm0, %xmm15
+; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: por %xmm0, %xmm10
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255]
-; SSE-NEXT: pand %xmm0, %xmm15
-; SSE-NEXT: pandn %xmm10, %xmm0
-; SSE-NEXT: por %xmm15, %xmm0
+; SSE-NEXT: pand %xmm0, %xmm10
+; SSE-NEXT: pandn %xmm9, %xmm0
+; SSE-NEXT: por %xmm10, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,0,0,0,4,5,6,7]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255]
-; SSE-NEXT: pand %xmm15, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm5[0,1,1,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,0,2,1]
-; SSE-NEXT: pandn %xmm10, %xmm15
-; SSE-NEXT: por %xmm0, %xmm15
+; SSE-NEXT: movdqa %xmm2, %xmm10
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm2[0,1,1,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,0,2,1]
+; SSE-NEXT: pandn %xmm9, %xmm10
+; SSE-NEXT: por %xmm0, %xmm10
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: movdqa %xmm7, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pand %xmm7, %xmm15
-; SSE-NEXT: por %xmm15, %xmm10
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: pand %xmm7, %xmm10
+; SSE-NEXT: por %xmm10, %xmm9
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255]
; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: pandn %xmm9, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[0,1,2,2]
@@ -6696,40 +6686,40 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
; SSE-NEXT: pand %xmm7, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; SSE-NEXT: movdqa %xmm4, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,2,1]
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; SSE-NEXT: movdqa %xmm14, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,4]
-; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: movdqa %xmm12, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm3
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm14, %xmm0
+; SSE-NEXT: pand %xmm12, %xmm0
; SSE-NEXT: por %xmm0, %xmm3
-; SSE-NEXT: pand %xmm4, %xmm3
-; SSE-NEXT: por %xmm10, %xmm3
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pand %xmm14, %xmm3
+; SSE-NEXT: por %xmm9, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[1,1,1,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: pand %xmm6, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm5[0,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,1,3]
-; SSE-NEXT: pandn %xmm10, %xmm4
-; SSE-NEXT: por %xmm0, %xmm4
+; SSE-NEXT: pand %xmm15, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm2[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,1,3]
+; SSE-NEXT: pandn %xmm9, %xmm15
+; SSE-NEXT: por %xmm0, %xmm15
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[1,1,1,1,4,5,6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm15
+; SSE-NEXT: movdqa %xmm1, %xmm10
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
-; SSE-NEXT: movdqa %xmm6, %xmm10
-; SSE-NEXT: pandn %xmm0, %xmm10
-; SSE-NEXT: pand %xmm6, %xmm4
-; SSE-NEXT: por %xmm4, %xmm10
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
-; SSE-NEXT: pand %xmm1, %xmm3
-; SSE-NEXT: pandn %xmm10, %xmm1
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,2,3,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255]
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pandn %xmm0, %xmm9
+; SSE-NEXT: pand %xmm1, %xmm15
+; SSE-NEXT: por %xmm15, %xmm9
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255]
+; SSE-NEXT: pand %xmm0, %xmm3
+; SSE-NEXT: pandn %xmm9, %xmm0
+; SSE-NEXT: por %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm9
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,2,3,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm11[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,2,1]
; SSE-NEXT: pand %xmm7, %xmm3
@@ -6737,43 +6727,44 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; SSE-NEXT: por %xmm3, %xmm7
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: pand %xmm6, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm6
+; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,2,2]
; SSE-NEXT: pandn %xmm3, %xmm6
; SSE-NEXT: por %xmm0, %xmm6
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: pand %xmm14, %xmm6
; SSE-NEXT: pandn %xmm7, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[2,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[2,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,1]
-; SSE-NEXT: pand %xmm14, %xmm0
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm5[0,1,2,3,4,6,5,7]
+; SSE-NEXT: pand %xmm12, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,4,6,5,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,1,3,2]
-; SSE-NEXT: pandn %xmm3, %xmm14
-; SSE-NEXT: por %xmm0, %xmm14
+; SSE-NEXT: pandn %xmm3, %xmm12
+; SSE-NEXT: por %xmm0, %xmm12
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; SSE-NEXT: pand %xmm3, %xmm14
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm15[2,2,2,2,4,5,6,7]
+; SSE-NEXT: pand %xmm3, %xmm12
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[2,2,2,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: pandn %xmm0, %xmm3
-; SSE-NEXT: por %xmm14, %xmm3
+; SSE-NEXT: por %xmm12, %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,0,0,0,255,255,255,0,0,0,0,255,255]
; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm0
-; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: pandn %xmm6, %xmm0
+; SSE-NEXT: por %xmm3, %xmm0
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movdqa %xmm3, 368(%rax)
-; SSE-NEXT: movdqa %xmm1, 352(%rax)
+; SSE-NEXT: movdqa %xmm0, 368(%rax)
+; SSE-NEXT: movdqa %xmm9, 352(%rax)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 336(%rax)
-; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT: movaps %xmm0, 320(%rax)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: movaps %xmm0, 320(%rax)
+; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 288(%rax)
-; SSE-NEXT: movdqa %xmm9, 256(%rax)
+; SSE-NEXT: movdqa %xmm5, 256(%rax)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 240(%rax)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
diff --git a/llvm/test/CodeGen/X86/vselect-minmax.ll b/llvm/test/CodeGen/X86/vselect-minmax.ll
index cb0542ca7cea8b..d0acfc31a203dd 100644
--- a/llvm/test/CodeGen/X86/vselect-minmax.ll
+++ b/llvm/test/CodeGen/X86/vselect-minmax.ll
@@ -10283,47 +10283,46 @@ entry:
define <8 x i64> @concat_smin_smax(<4 x i64> %a0, <4 x i64> %a1) {
; SSE2-LABEL: concat_smin_smax:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pxor %xmm6, %xmm4
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: pxor %xmm6, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm7
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: pxor %xmm5, %xmm6
+; SSE2-NEXT: movdqa %xmm6, %xmm7
; SSE2-NEXT: pcmpgtd %xmm4, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm4, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
-; SSE2-NEXT: pand %xmm8, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
-; SSE2-NEXT: por %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm7
-; SSE2-NEXT: pandn %xmm2, %xmm7
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pand %xmm5, %xmm4
-; SSE2-NEXT: por %xmm7, %xmm4
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: pxor %xmm6, %xmm7
-; SSE2-NEXT: pxor %xmm3, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm8
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm7, %xmm6
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2-NEXT: pand %xmm9, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm8[1,1,3,3]
-; SSE2-NEXT: por %xmm6, %xmm7
-; SSE2-NEXT: movdqa %xmm7, %xmm6
-; SSE2-NEXT: pandn %xmm3, %xmm6
-; SSE2-NEXT: movdqa %xmm7, %xmm8
+; SSE2-NEXT: pand %xmm8, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: pandn %xmm2, %xmm6
+; SSE2-NEXT: movdqa %xmm4, %xmm7
+; SSE2-NEXT: pandn %xmm0, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: pxor %xmm5, %xmm6
+; SSE2-NEXT: pxor %xmm3, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm6, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm8[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm6
+; SSE2-NEXT: movdqa %xmm6, %xmm5
+; SSE2-NEXT: pandn %xmm3, %xmm5
+; SSE2-NEXT: movdqa %xmm6, %xmm8
; SSE2-NEXT: pandn %xmm1, %xmm8
-; SSE2-NEXT: pand %xmm7, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: por %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm4, %xmm0
; SSE2-NEXT: retq
;
; SSE4-LABEL: concat_smin_smax:
More information about the llvm-commits
mailing list