[llvm] 793192d - [X86] Regenerate 32-bit merge-consecutive-loads tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 2 08:30:10 PST 2020


Author: Simon Pilgrim
Date: 2020-12-02T16:29:38Z
New Revision: 793192da7f4a0b89fc42247feeca33d456fc2bda

URL: https://github.com/llvm/llvm-project/commit/793192da7f4a0b89fc42247feeca33d456fc2bda
DIFF: https://github.com/llvm/llvm-project/commit/793192da7f4a0b89fc42247feeca33d456fc2bda.diff

LOG: [X86] Regenerate 32-bit merge-consecutive-loads tests

Avoid use of X32 check prefix - we try to only use that for gnux32 triple tests

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
    llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll
    llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
index 636e18efb862..f26c19b7dc76 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
@@ -6,8 +6,8 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX
 ;
 ; 32-bit SSE tests to make sure we do reasonable things.
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse | FileCheck %s --check-prefixes=X32-SSE,X32-SSE1
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=X32-SSE,X32-SSE41
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse | FileCheck %s --check-prefixes=X86-SSE,X86-SSE1
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=X86-SSE,X86-SSE41
 
 define <2 x double> @merge_2f64_f64_23(double* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_2f64_f64_23:
@@ -20,19 +20,19 @@ define <2 x double> @merge_2f64_f64_23(double* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vmovups 16(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_2f64_f64_23:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    fldl 16(%eax)
-; X32-SSE1-NEXT:    fldl 24(%eax)
-; X32-SSE1-NEXT:    fxch %st(1)
-; X32-SSE1-NEXT:    retl
-;
-; X32-SSE41-LABEL: merge_2f64_f64_23:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movups 16(%eax), %xmm0
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_2f64_f64_23:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    fldl 16(%eax)
+; X86-SSE1-NEXT:    fldl 24(%eax)
+; X86-SSE1-NEXT:    fxch %st(1)
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE41-LABEL: merge_2f64_f64_23:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movups 16(%eax), %xmm0
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 2
   %ptr1 = getelementptr inbounds double, double* %ptr, i64 3
   %val0 = load double, double* %ptr0
@@ -53,35 +53,35 @@ define <2 x i64> @merge_2i64_i64_12(i64* %ptr) nounwind uwtable noinline ssp {
 ; AVX-NEXT:    vmovups 8(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_2i64_i64_12:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    pushl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    pushl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT:    .cfi_offset %esi, -12
-; X32-SSE1-NEXT:    .cfi_offset %edi, -8
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 8(%ecx), %edx
-; X32-SSE1-NEXT:    movl 12(%ecx), %esi
-; X32-SSE1-NEXT:    movl 16(%ecx), %edi
-; X32-SSE1-NEXT:    movl 20(%ecx), %ecx
-; X32-SSE1-NEXT:    movl %ecx, 12(%eax)
-; X32-SSE1-NEXT:    movl %edi, 8(%eax)
-; X32-SSE1-NEXT:    movl %esi, 4(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    popl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    popl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_2i64_i64_12:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movups 8(%eax), %xmm0
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_2i64_i64_12:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    pushl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE1-NEXT:    .cfi_offset %esi, -12
+; X86-SSE1-NEXT:    .cfi_offset %edi, -8
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 8(%ecx), %edx
+; X86-SSE1-NEXT:    movl 12(%ecx), %esi
+; X86-SSE1-NEXT:    movl 16(%ecx), %edi
+; X86-SSE1-NEXT:    movl 20(%ecx), %ecx
+; X86-SSE1-NEXT:    movl %ecx, 12(%eax)
+; X86-SSE1-NEXT:    movl %edi, 8(%eax)
+; X86-SSE1-NEXT:    movl %esi, 4(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    popl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    popl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_2i64_i64_12:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movups 8(%eax), %xmm0
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
   %ptr1 = getelementptr inbounds i64, i64* %ptr, i64 2
   %val0 = load i64, i64* %ptr0
@@ -102,11 +102,11 @@ define <4 x float> @merge_4f32_f32_2345(float* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vmovups 8(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-SSE-LABEL: merge_4f32_f32_2345:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    movups 8(%eax), %xmm0
-; X32-SSE-NEXT:    retl
+; X86-SSE-LABEL: merge_4f32_f32_2345:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movups 8(%eax), %xmm0
+; X86-SSE-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 2
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 3
   %ptr2 = getelementptr inbounds float, float* %ptr, i64 4
@@ -133,11 +133,11 @@ define <4 x float> @merge_4f32_f32_3zuu(float* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE-LABEL: merge_4f32_f32_3zuu:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE-NEXT:    retl
+; X86-SSE-LABEL: merge_4f32_f32_3zuu:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 3
   %val0 = load float, float* %ptr0
   %res0 = insertelement <4 x float> undef, float %val0, i32 0
@@ -156,18 +156,18 @@ define <4 x float> @merge_4f32_f32_34uu(float* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4f32_f32_34uu:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    xorps %xmm0, %xmm0
-; X32-SSE1-NEXT:    movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
-; X32-SSE1-NEXT:    retl
-;
-; X32-SSE41-LABEL: merge_4f32_f32_34uu:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4f32_f32_34uu:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    xorps %xmm0, %xmm0
+; X86-SSE1-NEXT:    movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE41-LABEL: merge_4f32_f32_34uu:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 3
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 4
   %val0 = load float, float* %ptr0
@@ -199,22 +199,22 @@ define <4 x float> @merge_4f32_f32_34z6(float* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vblendps {{.*#+}} xmm0 = mem[0,1],xmm0[2],mem[3]
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4f32_f32_34z6:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movups 12(%eax), %xmm0
-; X32-SSE1-NEXT:    xorps %xmm1, %xmm1
-; X32-SSE1-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
-; X32-SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
-; X32-SSE1-NEXT:    retl
-;
-; X32-SSE41-LABEL: merge_4f32_f32_34z6:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movups 12(%eax), %xmm1
-; X32-SSE41-NEXT:    xorps %xmm0, %xmm0
-; X32-SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4f32_f32_34z6:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movups 12(%eax), %xmm0
+; X86-SSE1-NEXT:    xorps %xmm1, %xmm1
+; X86-SSE1-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
+; X86-SSE1-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE41-LABEL: merge_4f32_f32_34z6:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movups 12(%eax), %xmm1
+; X86-SSE41-NEXT:    xorps %xmm0, %xmm0
+; X86-SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 3
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 4
   %ptr3 = getelementptr inbounds float, float* %ptr, i64 6
@@ -238,18 +238,18 @@ define <4 x float> @merge_4f32_f32_45zz(float* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4f32_f32_45zz:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    xorps %xmm0, %xmm0
-; X32-SSE1-NEXT:    movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
-; X32-SSE1-NEXT:    retl
-;
-; X32-SSE41-LABEL: merge_4f32_f32_45zz:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4f32_f32_45zz:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    xorps %xmm0, %xmm0
+; X86-SSE1-NEXT:    movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE41-LABEL: merge_4f32_f32_45zz:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 4
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 5
   %val0 = load float, float* %ptr0
@@ -279,21 +279,21 @@ define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4f32_f32_012u:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    xorps %xmm0, %xmm0
-; X32-SSE1-NEXT:    movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
-; X32-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE1-NEXT:    retl
-;
-; X32-SSE41-LABEL: merge_4f32_f32_012u:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4f32_f32_012u:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    xorps %xmm0, %xmm0
+; X86-SSE1-NEXT:    movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE41-LABEL: merge_4f32_f32_012u:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 0
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 1
   %ptr2 = getelementptr inbounds float, float* %ptr, i64 2
@@ -327,21 +327,21 @@ define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4f32_f32_019u:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    xorps %xmm0, %xmm0
-; X32-SSE1-NEXT:    movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
-; X32-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE1-NEXT:    retl
-;
-; X32-SSE41-LABEL: merge_4f32_f32_019u:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4f32_f32_019u:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    xorps %xmm0, %xmm0
+; X86-SSE1-NEXT:    movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE41-LABEL: merge_4f32_f32_019u:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 0
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 1
   %ptr2 = getelementptr inbounds float, float* %ptr, i64 9
@@ -366,28 +366,28 @@ define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp {
 ; AVX-NEXT:    vmovups 8(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4i32_i32_23u5:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    pushl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    .cfi_offset %esi, -8
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 8(%ecx), %edx
-; X32-SSE1-NEXT:    movl 12(%ecx), %esi
-; X32-SSE1-NEXT:    movl 20(%ecx), %ecx
-; X32-SSE1-NEXT:    movl %esi, 4(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    movl %ecx, 12(%eax)
-; X32-SSE1-NEXT:    popl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_4i32_i32_23u5:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movups 8(%eax), %xmm0
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4i32_i32_23u5:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    .cfi_offset %esi, -8
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 8(%ecx), %edx
+; X86-SSE1-NEXT:    movl 12(%ecx), %esi
+; X86-SSE1-NEXT:    movl 20(%ecx), %ecx
+; X86-SSE1-NEXT:    movl %esi, 4(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    movl %ecx, 12(%eax)
+; X86-SSE1-NEXT:    popl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_4i32_i32_23u5:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movups 8(%eax), %xmm0
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 2
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 3
   %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 5
@@ -413,36 +413,36 @@ define <4 x i32> @merge_4i32_i32_23u5_inc2(i32* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    incl 8(%rdi)
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4i32_i32_23u5_inc2:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    pushl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    pushl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT:    .cfi_offset %esi, -12
-; X32-SSE1-NEXT:    .cfi_offset %edi, -8
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 8(%ecx), %edx
-; X32-SSE1-NEXT:    movl 12(%ecx), %esi
-; X32-SSE1-NEXT:    leal 1(%edx), %edi
-; X32-SSE1-NEXT:    movl %edi, 8(%ecx)
-; X32-SSE1-NEXT:    movl 20(%ecx), %ecx
-; X32-SSE1-NEXT:    movl %esi, 4(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    movl %ecx, 12(%eax)
-; X32-SSE1-NEXT:    popl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    popl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_4i32_i32_23u5_inc2:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movups 8(%eax), %xmm0
-; X32-SSE41-NEXT:    incl 8(%eax)
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4i32_i32_23u5_inc2:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    pushl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE1-NEXT:    .cfi_offset %esi, -12
+; X86-SSE1-NEXT:    .cfi_offset %edi, -8
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 8(%ecx), %edx
+; X86-SSE1-NEXT:    movl 12(%ecx), %esi
+; X86-SSE1-NEXT:    leal 1(%edx), %edi
+; X86-SSE1-NEXT:    movl %edi, 8(%ecx)
+; X86-SSE1-NEXT:    movl 20(%ecx), %ecx
+; X86-SSE1-NEXT:    movl %esi, 4(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    movl %ecx, 12(%eax)
+; X86-SSE1-NEXT:    popl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    popl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_4i32_i32_23u5_inc2:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movups 8(%eax), %xmm0
+; X86-SSE41-NEXT:    incl 8(%eax)
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 2
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 3
   %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 5
@@ -470,36 +470,36 @@ define <4 x i32> @merge_4i32_i32_23u5_inc3(i32* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    incl 12(%rdi)
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4i32_i32_23u5_inc3:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    pushl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    pushl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT:    .cfi_offset %esi, -12
-; X32-SSE1-NEXT:    .cfi_offset %edi, -8
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 8(%ecx), %edx
-; X32-SSE1-NEXT:    movl 12(%ecx), %esi
-; X32-SSE1-NEXT:    leal 1(%esi), %edi
-; X32-SSE1-NEXT:    movl %edi, 12(%ecx)
-; X32-SSE1-NEXT:    movl 20(%ecx), %ecx
-; X32-SSE1-NEXT:    movl %esi, 4(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    movl %ecx, 12(%eax)
-; X32-SSE1-NEXT:    popl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    popl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_4i32_i32_23u5_inc3:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movups 8(%eax), %xmm0
-; X32-SSE41-NEXT:    incl 12(%eax)
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4i32_i32_23u5_inc3:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    pushl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE1-NEXT:    .cfi_offset %esi, -12
+; X86-SSE1-NEXT:    .cfi_offset %edi, -8
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 8(%ecx), %edx
+; X86-SSE1-NEXT:    movl 12(%ecx), %esi
+; X86-SSE1-NEXT:    leal 1(%esi), %edi
+; X86-SSE1-NEXT:    movl %edi, 12(%ecx)
+; X86-SSE1-NEXT:    movl 20(%ecx), %ecx
+; X86-SSE1-NEXT:    movl %esi, 4(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    movl %ecx, 12(%eax)
+; X86-SSE1-NEXT:    popl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    popl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_4i32_i32_23u5_inc3:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movups 8(%eax), %xmm0
+; X86-SSE41-NEXT:    incl 12(%eax)
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 2
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 3
   %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 5
@@ -525,20 +525,20 @@ define <4 x i32> @merge_4i32_i32_3zuu(i32* %ptr) nounwind uwtable noinline ssp {
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4i32_i32_3zuu:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 12(%ecx), %ecx
-; X32-SSE1-NEXT:    movl %ecx, (%eax)
-; X32-SSE1-NEXT:    movl $0, 4(%eax)
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_4i32_i32_3zuu:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4i32_i32_3zuu:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 12(%ecx), %ecx
+; X86-SSE1-NEXT:    movl %ecx, (%eax)
+; X86-SSE1-NEXT:    movl $0, 4(%eax)
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_4i32_i32_3zuu:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 3
   %val0 = load i32, i32* %ptr0
   %res0 = insertelement <4 x i32> undef, i32 %val0, i32 0
@@ -557,21 +557,21 @@ define <4 x i32> @merge_4i32_i32_34uu(i32* %ptr) nounwind uwtable noinline ssp {
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4i32_i32_34uu:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 12(%ecx), %edx
-; X32-SSE1-NEXT:    movl 16(%ecx), %ecx
-; X32-SSE1-NEXT:    movl %ecx, 4(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_4i32_i32_34uu:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4i32_i32_34uu:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 12(%ecx), %edx
+; X86-SSE1-NEXT:    movl 16(%ecx), %ecx
+; X86-SSE1-NEXT:    movl %ecx, 4(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_4i32_i32_34uu:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 3
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 4
   %val0 = load i32, i32* %ptr0
@@ -592,23 +592,23 @@ define <4 x i32> @merge_4i32_i32_45zz(i32* %ptr) nounwind uwtable noinline ssp {
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4i32_i32_45zz:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 16(%ecx), %edx
-; X32-SSE1-NEXT:    movl 20(%ecx), %ecx
-; X32-SSE1-NEXT:    movl %ecx, 4(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    movl $0, 12(%eax)
-; X32-SSE1-NEXT:    movl $0, 8(%eax)
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_4i32_i32_45zz:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4i32_i32_45zz:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 16(%ecx), %edx
+; X86-SSE1-NEXT:    movl 20(%ecx), %ecx
+; X86-SSE1-NEXT:    movl %ecx, 4(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    movl $0, 12(%eax)
+; X86-SSE1-NEXT:    movl $0, 8(%eax)
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_4i32_i32_45zz:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 4
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 5
   %val0 = load i32, i32* %ptr0
@@ -631,36 +631,36 @@ define <4 x i32> @merge_4i32_i32_45zz_inc4(i32* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    incl 16(%rdi)
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4i32_i32_45zz_inc4:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    pushl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    pushl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT:    .cfi_offset %esi, -12
-; X32-SSE1-NEXT:    .cfi_offset %edi, -8
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 16(%ecx), %edx
-; X32-SSE1-NEXT:    movl 20(%ecx), %esi
-; X32-SSE1-NEXT:    leal 1(%edx), %edi
-; X32-SSE1-NEXT:    movl %edi, 16(%ecx)
-; X32-SSE1-NEXT:    movl %esi, 4(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    movl $0, 12(%eax)
-; X32-SSE1-NEXT:    movl $0, 8(%eax)
-; X32-SSE1-NEXT:    popl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    popl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_4i32_i32_45zz_inc4:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE41-NEXT:    incl 16(%eax)
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4i32_i32_45zz_inc4:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    pushl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE1-NEXT:    .cfi_offset %esi, -12
+; X86-SSE1-NEXT:    .cfi_offset %edi, -8
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 16(%ecx), %edx
+; X86-SSE1-NEXT:    movl 20(%ecx), %esi
+; X86-SSE1-NEXT:    leal 1(%edx), %edi
+; X86-SSE1-NEXT:    movl %edi, 16(%ecx)
+; X86-SSE1-NEXT:    movl %esi, 4(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    movl $0, 12(%eax)
+; X86-SSE1-NEXT:    movl $0, 8(%eax)
+; X86-SSE1-NEXT:    popl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    popl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_4i32_i32_45zz_inc4:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    incl 16(%eax)
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 4
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 5
   %val0 = load i32, i32* %ptr0
@@ -685,36 +685,36 @@ define <4 x i32> @merge_4i32_i32_45zz_inc5(i32* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    incl 20(%rdi)
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4i32_i32_45zz_inc5:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    pushl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    pushl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT:    .cfi_offset %esi, -12
-; X32-SSE1-NEXT:    .cfi_offset %edi, -8
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 16(%ecx), %edx
-; X32-SSE1-NEXT:    movl 20(%ecx), %esi
-; X32-SSE1-NEXT:    leal 1(%esi), %edi
-; X32-SSE1-NEXT:    movl %edi, 20(%ecx)
-; X32-SSE1-NEXT:    movl %esi, 4(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    movl $0, 12(%eax)
-; X32-SSE1-NEXT:    movl $0, 8(%eax)
-; X32-SSE1-NEXT:    popl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    popl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_4i32_i32_45zz_inc5:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE41-NEXT:    incl 20(%eax)
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4i32_i32_45zz_inc5:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    pushl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE1-NEXT:    .cfi_offset %esi, -12
+; X86-SSE1-NEXT:    .cfi_offset %edi, -8
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 16(%ecx), %edx
+; X86-SSE1-NEXT:    movl 20(%ecx), %esi
+; X86-SSE1-NEXT:    leal 1(%esi), %edi
+; X86-SSE1-NEXT:    movl %edi, 20(%ecx)
+; X86-SSE1-NEXT:    movl %esi, 4(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    movl $0, 12(%eax)
+; X86-SSE1-NEXT:    movl $0, 8(%eax)
+; X86-SSE1-NEXT:    popl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    popl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_4i32_i32_45zz_inc5:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    incl 20(%eax)
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 4
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 5
   %val0 = load i32, i32* %ptr0
@@ -737,35 +737,35 @@ define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vmovups 4(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_8i16_i16_23u567u9:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    pushl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    pushl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT:    .cfi_offset %esi, -12
-; X32-SSE1-NEXT:    .cfi_offset %edi, -8
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 4(%ecx), %edx
-; X32-SSE1-NEXT:    movl 10(%ecx), %esi
-; X32-SSE1-NEXT:    movzwl 14(%ecx), %edi
-; X32-SSE1-NEXT:    movzwl 18(%ecx), %ecx
-; X32-SSE1-NEXT:    movw %di, 10(%eax)
-; X32-SSE1-NEXT:    movw %cx, 14(%eax)
-; X32-SSE1-NEXT:    movl %esi, 6(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    popl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    popl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_8i16_i16_23u567u9:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movups 4(%eax), %xmm0
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_8i16_i16_23u567u9:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    pushl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE1-NEXT:    .cfi_offset %esi, -12
+; X86-SSE1-NEXT:    .cfi_offset %edi, -8
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 4(%ecx), %edx
+; X86-SSE1-NEXT:    movl 10(%ecx), %esi
+; X86-SSE1-NEXT:    movzwl 14(%ecx), %edi
+; X86-SSE1-NEXT:    movzwl 18(%ecx), %ecx
+; X86-SSE1-NEXT:    movw %di, 10(%eax)
+; X86-SSE1-NEXT:    movw %cx, 14(%eax)
+; X86-SSE1-NEXT:    movl %esi, 6(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    popl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    popl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_8i16_i16_23u567u9:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movups 4(%eax), %xmm0
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 2
   %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 3
   %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 5
@@ -798,19 +798,19 @@ define <8 x i16> @merge_8i16_i16_34uuuuuu(i16* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_8i16_i16_34uuuuuu:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 6(%ecx), %ecx
-; X32-SSE1-NEXT:    movl %ecx, (%eax)
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_8i16_i16_34uuuuuu:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_8i16_i16_34uuuuuu:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 6(%ecx), %ecx
+; X86-SSE1-NEXT:    movl %ecx, (%eax)
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_8i16_i16_34uuuuuu:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 3
   %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 4
   %val0 = load i16, i16* %ptr0
@@ -831,23 +831,23 @@ define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_8i16_i16_45u7zzzz:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 8(%ecx), %edx
-; X32-SSE1-NEXT:    movzwl 14(%ecx), %ecx
-; X32-SSE1-NEXT:    movw %cx, 6(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    movl $0, 12(%eax)
-; X32-SSE1-NEXT:    movl $0, 8(%eax)
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_8i16_i16_45u7zzzz:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_8i16_i16_45u7zzzz:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 8(%ecx), %edx
+; X86-SSE1-NEXT:    movzwl 14(%ecx), %ecx
+; X86-SSE1-NEXT:    movw %cx, 6(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    movl $0, 12(%eax)
+; X86-SSE1-NEXT:    movl $0, 8(%eax)
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_8i16_i16_45u7zzzz:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4
   %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5
   %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 7
@@ -875,49 +875,49 @@ define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noin
 ; AVX-NEXT:    vmovups (%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_16i8_i8_01u3456789ABCDuF:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    pushl %ebp
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    pushl %ebx
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT:    pushl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 16
-; X32-SSE1-NEXT:    pushl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 20
-; X32-SSE1-NEXT:    .cfi_offset %esi, -20
-; X32-SSE1-NEXT:    .cfi_offset %edi, -16
-; X32-SSE1-NEXT:    .cfi_offset %ebx, -12
-; X32-SSE1-NEXT:    .cfi_offset %ebp, -8
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movzwl (%ecx), %ebp
-; X32-SSE1-NEXT:    movl 3(%ecx), %esi
-; X32-SSE1-NEXT:    movl 7(%ecx), %edi
-; X32-SSE1-NEXT:    movzwl 11(%ecx), %ebx
-; X32-SSE1-NEXT:    movb 13(%ecx), %dl
-; X32-SSE1-NEXT:    movb 15(%ecx), %cl
-; X32-SSE1-NEXT:    movb %dl, 13(%eax)
-; X32-SSE1-NEXT:    movb %cl, 15(%eax)
-; X32-SSE1-NEXT:    movw %bx, 11(%eax)
-; X32-SSE1-NEXT:    movl %edi, 7(%eax)
-; X32-SSE1-NEXT:    movl %esi, 3(%eax)
-; X32-SSE1-NEXT:    movw %bp, (%eax)
-; X32-SSE1-NEXT:    popl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 16
-; X32-SSE1-NEXT:    popl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT:    popl %ebx
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    popl %ebp
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_16i8_i8_01u3456789ABCDuF:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movups (%eax), %xmm0
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_16i8_i8_01u3456789ABCDuF:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %ebp
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    pushl %ebx
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE1-NEXT:    pushl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 16
+; X86-SSE1-NEXT:    pushl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 20
+; X86-SSE1-NEXT:    .cfi_offset %esi, -20
+; X86-SSE1-NEXT:    .cfi_offset %edi, -16
+; X86-SSE1-NEXT:    .cfi_offset %ebx, -12
+; X86-SSE1-NEXT:    .cfi_offset %ebp, -8
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movzwl (%ecx), %ebp
+; X86-SSE1-NEXT:    movl 3(%ecx), %esi
+; X86-SSE1-NEXT:    movl 7(%ecx), %edi
+; X86-SSE1-NEXT:    movzwl 11(%ecx), %ebx
+; X86-SSE1-NEXT:    movb 13(%ecx), %dl
+; X86-SSE1-NEXT:    movb 15(%ecx), %cl
+; X86-SSE1-NEXT:    movb %dl, 13(%eax)
+; X86-SSE1-NEXT:    movb %cl, 15(%eax)
+; X86-SSE1-NEXT:    movw %bx, 11(%eax)
+; X86-SSE1-NEXT:    movl %edi, 7(%eax)
+; X86-SSE1-NEXT:    movl %esi, 3(%eax)
+; X86-SSE1-NEXT:    movw %bp, (%eax)
+; X86-SSE1-NEXT:    popl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 16
+; X86-SSE1-NEXT:    popl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE1-NEXT:    popl %ebx
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    popl %ebp
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_16i8_i8_01u3456789ABCDuF:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movups (%eax), %xmm0
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 0
   %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 1
   %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 3
@@ -974,24 +974,24 @@ define <16 x i8> @merge_16i8_i8_01u3uuzzuuuuuzzz(i8* %ptr) nounwind uwtable noin
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movzwl (%ecx), %edx
-; X32-SSE1-NEXT:    movb 3(%ecx), %cl
-; X32-SSE1-NEXT:    movb %cl, 3(%eax)
-; X32-SSE1-NEXT:    movw %dx, (%eax)
-; X32-SSE1-NEXT:    movb $0, 15(%eax)
-; X32-SSE1-NEXT:    movw $0, 13(%eax)
-; X32-SSE1-NEXT:    movw $0, 6(%eax)
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movzwl (%ecx), %edx
+; X86-SSE1-NEXT:    movb 3(%ecx), %cl
+; X86-SSE1-NEXT:    movb %cl, 3(%eax)
+; X86-SSE1-NEXT:    movw %dx, (%eax)
+; X86-SSE1-NEXT:    movb $0, 15(%eax)
+; X86-SSE1-NEXT:    movw $0, 13(%eax)
+; X86-SSE1-NEXT:    movw $0, 6(%eax)
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 0
   %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 1
   %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 3
@@ -1020,23 +1020,23 @@ define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noin
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl (%ecx), %edx
-; X32-SSE1-NEXT:    movzwl 6(%ecx), %ecx
-; X32-SSE1-NEXT:    movw %cx, 6(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    movb $0, 15(%eax)
-; X32-SSE1-NEXT:    movw $0, 13(%eax)
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl (%ecx), %edx
+; X86-SSE1-NEXT:    movzwl 6(%ecx), %ecx
+; X86-SSE1-NEXT:    movw %cx, 6(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    movb $0, 15(%eax)
+; X86-SSE1-NEXT:    movw $0, 13(%eax)
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 0
   %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 1
   %ptr2 = getelementptr inbounds i8, i8* %ptr, i64 2
@@ -1074,23 +1074,23 @@ define void @merge_4i32_i32_combine(<4 x i32>* %dst, i32* %src) {
 ; AVX-NEXT:    vmovaps %xmm0, (%rdi)
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4i32_i32_combine:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT:    andps %xmm0, %xmm1
-; X32-SSE1-NEXT:    movaps %xmm1, (%eax)
-; X32-SSE1-NEXT:    retl
-;
-; X32-SSE41-LABEL: merge_4i32_i32_combine:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE41-NEXT:    movaps %xmm0, (%eax)
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4i32_i32_combine:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT:    andps %xmm0, %xmm1
+; X86-SSE1-NEXT:    movaps %xmm1, (%eax)
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE41-LABEL: merge_4i32_i32_combine:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE41-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE41-NEXT:    retl
  %1 = getelementptr i32, i32* %src, i32 0
  %2 = load i32, i32* %1
  %3 = insertelement <4 x i32> undef, i32 %2, i32 0
@@ -1120,38 +1120,38 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_2i64_i64_12_volatile:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    pushl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    pushl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT:    .cfi_offset %esi, -12
-; X32-SSE1-NEXT:    .cfi_offset %edi, -8
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl 8(%ecx), %edx
-; X32-SSE1-NEXT:    movl 12(%ecx), %esi
-; X32-SSE1-NEXT:    movl 16(%ecx), %edi
-; X32-SSE1-NEXT:    movl 20(%ecx), %ecx
-; X32-SSE1-NEXT:    movl %ecx, 12(%eax)
-; X32-SSE1-NEXT:    movl %edi, 8(%eax)
-; X32-SSE1-NEXT:    movl %esi, 4(%eax)
-; X32-SSE1-NEXT:    movl %edx, (%eax)
-; X32-SSE1-NEXT:    popl %esi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT:    popl %edi
-; X32-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: merge_2i64_i64_12_volatile:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE41-NEXT:    pinsrd $1, 12(%eax), %xmm0
-; X32-SSE41-NEXT:    pinsrd $2, 16(%eax), %xmm0
-; X32-SSE41-NEXT:    pinsrd $3, 20(%eax), %xmm0
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_2i64_i64_12_volatile:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    pushl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE1-NEXT:    .cfi_offset %esi, -12
+; X86-SSE1-NEXT:    .cfi_offset %edi, -8
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl 8(%ecx), %edx
+; X86-SSE1-NEXT:    movl 12(%ecx), %esi
+; X86-SSE1-NEXT:    movl 16(%ecx), %edi
+; X86-SSE1-NEXT:    movl 20(%ecx), %ecx
+; X86-SSE1-NEXT:    movl %ecx, 12(%eax)
+; X86-SSE1-NEXT:    movl %edi, 8(%eax)
+; X86-SSE1-NEXT:    movl %esi, 4(%eax)
+; X86-SSE1-NEXT:    movl %edx, (%eax)
+; X86-SSE1-NEXT:    popl %esi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    popl %edi
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: merge_2i64_i64_12_volatile:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE41-NEXT:    pinsrd $1, 12(%eax), %xmm0
+; X86-SSE41-NEXT:    pinsrd $2, 16(%eax), %xmm0
+; X86-SSE41-NEXT:    pinsrd $3, 20(%eax), %xmm0
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
   %ptr1 = getelementptr inbounds i64, i64* %ptr, i64 2
   %val0 = load volatile i64, i64* %ptr0
@@ -1186,23 +1186,23 @@ define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable n
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: merge_4f32_f32_2345_volatile:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE1-NEXT:    movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
-; X32-SSE1-NEXT:    retl
-;
-; X32-SSE41-LABEL: merge_4f32_f32_2345_volatile:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
-; X32-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; X32-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: merge_4f32_f32_2345_volatile:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-SSE1-NEXT:    movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE41-LABEL: merge_4f32_f32_2345_volatile:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; X86-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; X86-SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; X86-SSE41-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 2
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 3
   %ptr2 = getelementptr inbounds float, float* %ptr, i64 4
@@ -1237,14 +1237,14 @@ define <4 x float> @merge_4f32_f32_X0YY(float* %ptr0, float* %ptr1) nounwind uwt
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0,0]
 ; AVX-NEXT:    retq
 ;
-; X32-SSE-LABEL: merge_4f32_f32_X0YY:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
-; X32-SSE-NEXT:    retl
+; X86-SSE-LABEL: merge_4f32_f32_X0YY:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
+; X86-SSE-NEXT:    retl
   %val0 = load float, float* %ptr0, align 4
   %val1 = load float, float* %ptr1, align 4
   %res0 = insertelement <4 x float> undef, float %val0, i32 0
@@ -1270,22 +1270,22 @@ define <4 x i32> @load_i32_zext_i128_v4i32(i32* %ptr) {
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
-; X32-SSE1-LABEL: load_i32_zext_i128_v4i32:
-; X32-SSE1:       # %bb.0:
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE1-NEXT:    movl (%ecx), %ecx
-; X32-SSE1-NEXT:    movl %ecx, (%eax)
-; X32-SSE1-NEXT:    movl $0, 12(%eax)
-; X32-SSE1-NEXT:    movl $0, 8(%eax)
-; X32-SSE1-NEXT:    movl $0, 4(%eax)
-; X32-SSE1-NEXT:    retl $4
-;
-; X32-SSE41-LABEL: load_i32_zext_i128_v4i32:
-; X32-SSE41:       # %bb.0:
-; X32-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE41-NEXT:    retl
+; X86-SSE1-LABEL: load_i32_zext_i128_v4i32:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movl (%ecx), %ecx
+; X86-SSE1-NEXT:    movl %ecx, (%eax)
+; X86-SSE1-NEXT:    movl $0, 12(%eax)
+; X86-SSE1-NEXT:    movl $0, 8(%eax)
+; X86-SSE1-NEXT:    movl $0, 4(%eax)
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: load_i32_zext_i128_v4i32:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE41-NEXT:    retl
   %1 = load i32, i32* %ptr
   %2 = zext i32 %1 to i128
   %3 = bitcast i128 %2 to <4 x i32>

diff  --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll
index e957b654980d..15057577c53c 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll
@@ -4,7 +4,7 @@
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512F
 ;
 ; Just one 32-bit run to make sure we do reasonable things.
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86-AVX
 
 define <4 x double> @merge_4f64_2f64_23(<2 x double>* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_2f64_23:
@@ -12,11 +12,11 @@ define <4 x double> @merge_4f64_2f64_23(<2 x double>* %ptr) nounwind uwtable noi
 ; AVX-NEXT:    vmovups 32(%rdi), %ymm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4f64_2f64_23:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups 32(%eax), %ymm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4f64_2f64_23:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups 32(%eax), %ymm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
   %ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 3
   %val0 = load <2 x double>, <2 x double>* %ptr0
@@ -31,11 +31,11 @@ define <4 x double> @merge_4f64_2f64_2z(<2 x double>* %ptr) nounwind uwtable noi
 ; AVX-NEXT:    vmovaps 32(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4f64_2f64_2z:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovaps 32(%eax), %xmm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4f64_2f64_2z:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovaps 32(%eax), %xmm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
   %val0 = load <2 x double>, <2 x double>* %ptr0
   %res = shufflevector <2 x double> %val0, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -48,11 +48,11 @@ define <4 x double> @merge_4f64_f64_2345(double* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    vmovups 16(%rdi), %ymm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4f64_f64_2345:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups 16(%eax), %ymm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4f64_f64_2345:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups 16(%eax), %ymm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 2
   %ptr1 = getelementptr inbounds double, double* %ptr, i64 3
   %ptr2 = getelementptr inbounds double, double* %ptr, i64 4
@@ -74,11 +74,11 @@ define <4 x double> @merge_4f64_f64_3zuu(double* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4f64_f64_3zuu:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4f64_f64_3zuu:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 3
   %val0 = load double, double* %ptr0
   %res0 = insertelement <4 x double> undef, double %val0, i32 0
@@ -92,11 +92,11 @@ define <4 x double> @merge_4f64_f64_34uu(double* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    vmovups 24(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4f64_f64_34uu:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups 24(%eax), %xmm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4f64_f64_34uu:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups 24(%eax), %xmm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 3
   %ptr1 = getelementptr inbounds double, double* %ptr, i64 4
   %val0 = load double, double* %ptr0
@@ -112,11 +112,11 @@ define <4 x double> @merge_4f64_f64_45zz(double* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    vmovups 32(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4f64_f64_45zz:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups 32(%eax), %xmm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4f64_f64_45zz:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups 32(%eax), %xmm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 4
   %ptr1 = getelementptr inbounds double, double* %ptr, i64 5
   %val0 = load double, double* %ptr0
@@ -133,12 +133,12 @@ define <4 x double> @merge_4f64_f64_34z6(double* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4f64_f64_34z6:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4f64_f64_34z6:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 3
   %ptr1 = getelementptr inbounds double, double* %ptr, i64 4
   %ptr3 = getelementptr inbounds double, double* %ptr, i64 6
@@ -158,11 +158,11 @@ define <4 x i64> @merge_4i64_2i64_3z(<2 x i64>* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    vmovaps 48(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4i64_2i64_3z:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovaps 48(%eax), %xmm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4i64_2i64_3z:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovaps 48(%eax), %xmm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 3
   %val0 = load <2 x i64>, <2 x i64>* %ptr0
   %res = shufflevector <2 x i64> %val0, <2 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -175,11 +175,11 @@ define <4 x i64> @merge_4i64_i64_1234(i64* %ptr) nounwind uwtable noinline ssp {
 ; AVX-NEXT:    vmovups 8(%rdi), %ymm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4i64_i64_1234:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups 8(%eax), %ymm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4i64_i64_1234:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups 8(%eax), %ymm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
   %ptr1 = getelementptr inbounds i64, i64* %ptr, i64 2
   %ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3
@@ -201,11 +201,11 @@ define <4 x i64> @merge_4i64_i64_1zzu(i64* %ptr) nounwind uwtable noinline ssp {
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4i64_i64_1zzu:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4i64_i64_1zzu:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
   %val0 = load i64, i64* %ptr0
   %res0 = insertelement <4 x i64> undef, i64 %val0, i32 0
@@ -220,11 +220,11 @@ define <4 x i64> @merge_4i64_i64_23zz(i64* %ptr) nounwind uwtable noinline ssp {
 ; AVX-NEXT:    vmovups 16(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4i64_i64_23zz:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups 16(%eax), %xmm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4i64_i64_23zz:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups 16(%eax), %xmm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 2
   %ptr1 = getelementptr inbounds i64, i64* %ptr, i64 3
   %val0 = load i64, i64* %ptr0
@@ -243,14 +243,14 @@ define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noi
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_8f32_2f32_23z5:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups 16(%eax), %xmm0
-; X32-AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX-NEXT:    vmovhps {{.*#+}} xmm1 = xmm1[0,1],mem[0,1]
-; X32-AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_8f32_2f32_23z5:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups 16(%eax), %xmm0
+; X86-AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX-NEXT:    vmovhps {{.*#+}} xmm1 = xmm1[0,1],mem[0,1]
+; X86-AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 2
   %ptr1 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 3
   %ptr3 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 5
@@ -270,12 +270,12 @@ define <8 x float> @merge_8f32_4f32_z2(<4 x float>* %ptr) nounwind uwtable noinl
 ; AVX-NEXT:    vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_8f32_4f32_z2:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX-NEXT:    vinsertf128 $1, 32(%eax), %ymm0, %ymm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_8f32_4f32_z2:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT:    vinsertf128 $1, 32(%eax), %ymm0, %ymm0
+; X86-AVX-NEXT:    retl
   %ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 2
   %val1 = load <4 x float>, <4 x float>* %ptr1
   %res = shufflevector <4 x float> zeroinitializer, <4 x float> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -288,11 +288,11 @@ define <8 x float> @merge_8f32_f32_12zzuuzz(float* %ptr) nounwind uwtable noinli
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_8f32_f32_12zzuuzz:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_8f32_f32_12zzuuzz:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 1
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 2
   %val0 = load float, float* %ptr0
@@ -313,12 +313,12 @@ define <8 x float> @merge_8f32_f32_1u3u5zu8(float* %ptr) nounwind uwtable noinli
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_8f32_f32_1u3u5zu8:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_8f32_f32_1u3u5zu8:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 1
   %ptr2 = getelementptr inbounds float, float* %ptr, i64 3
   %ptr4 = getelementptr inbounds float, float* %ptr, i64 5
@@ -342,12 +342,12 @@ define <8 x i32> @merge_8i32_4i32_z3(<4 x i32>* %ptr) nounwind uwtable noinline
 ; AVX-NEXT:    vinsertf128 $1, 48(%rdi), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_8i32_4i32_z3:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX-NEXT:    vinsertf128 $1, 48(%eax), %ymm0, %ymm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_8i32_4i32_z3:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT:    vinsertf128 $1, 48(%eax), %ymm0, %ymm0
+; X86-AVX-NEXT:    retl
   %ptr1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 3
   %val1 = load <4 x i32>, <4 x i32>* %ptr1
   %res = shufflevector <4 x i32> zeroinitializer, <4 x i32> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -362,13 +362,13 @@ define <8 x i32> @merge_8i32_i32_56zz9uzz(i32* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_8i32_i32_56zz9uzz:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_8i32_i32_56zz9uzz:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 5
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 6
   %ptr4 = getelementptr inbounds i32, i32* %ptr, i64 9
@@ -392,12 +392,12 @@ define <8 x i32> @merge_8i32_i32_1u3u5zu8(i32* %ptr) nounwind uwtable noinline s
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_8i32_i32_1u3u5zu8:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_8i32_i32_1u3u5zu8:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 1
   %ptr2 = getelementptr inbounds i32, i32* %ptr, i64 3
   %ptr4 = getelementptr inbounds i32, i32* %ptr, i64 5
@@ -420,11 +420,11 @@ define <16 x i16> @merge_16i16_i16_89zzzuuuuuuuuuuuz(i16* %ptr) nounwind uwtable
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 8
   %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 9
   %val0 = load i16, i16* %ptr0
@@ -444,11 +444,11 @@ define <16 x i16> @merge_16i16_i16_45u7uuuuuuuuuuuu(i16* %ptr) nounwind uwtable
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4
   %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5
   %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 7
@@ -467,11 +467,11 @@ define <16 x i16> @merge_16i16_i16_0uu3uuuuuuuuCuEF(i16* %ptr) nounwind uwtable
 ; AVX-NEXT:    vmovups (%rdi), %ymm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_16i16_i16_0uu3uuuuuuuuCuEF:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups (%eax), %ymm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_16i16_i16_0uu3uuuuuuuuCuEF:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups (%eax), %ymm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
   %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3
   %ptrC = getelementptr inbounds i16, i16* %ptr, i64 12
@@ -497,12 +497,12 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF(i16* %ptr) nounwind uwtable
 ; AVX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups (%eax), %ymm0
-; X32-AVX-NEXT:    vandps {{\.LCPI.*}}, %ymm0, %ymm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovups (%eax), %ymm0
+; X86-AVX-NEXT:    vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
   %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3
   %ptrC = getelementptr inbounds i16, i16* %ptr, i64 12
@@ -530,11 +530,11 @@ define <32 x i8> @merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i8* %ptr) nounw
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 4
   %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 5
   %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 7
@@ -553,11 +553,11 @@ define <32 x i8> @merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu(i8* %ptr) nounw
 ; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 2
   %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 3
   %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 5
@@ -585,12 +585,12 @@ define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable
 ; AVX-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_4f64_f64_34uz_volatile:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_4f64_f64_34uz_volatile:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 3
   %ptr1 = getelementptr inbounds double, double* %ptr, i64 4
   %val0 = load volatile double, double* %ptr0
@@ -638,18 +638,18 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind
 ; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    retq
 ;
-; X32-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    movzwl (%eax), %ecx
-; X32-AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; X32-AVX-NEXT:    vpinsrw $4, 24(%eax), %xmm0, %xmm0
-; X32-AVX-NEXT:    vpinsrw $6, 28(%eax), %xmm0, %xmm0
-; X32-AVX-NEXT:    vpinsrw $7, 30(%eax), %xmm0, %xmm0
-; X32-AVX-NEXT:    vmovd %ecx, %xmm1
-; X32-AVX-NEXT:    vpinsrw $3, 6(%eax), %xmm1, %xmm1
-; X32-AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movzwl (%eax), %ecx
+; X86-AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrw $4, 24(%eax), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrw $6, 28(%eax), %xmm0, %xmm0
+; X86-AVX-NEXT:    vpinsrw $7, 30(%eax), %xmm0, %xmm0
+; X86-AVX-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX-NEXT:    vpinsrw $3, 6(%eax), %xmm1, %xmm1
+; X86-AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
   %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3
   %ptrC = getelementptr inbounds i16, i16* %ptr, i64 12
@@ -686,14 +686,14 @@ define <2 x i8> @PR42846(<2 x i8>* %j, <2 x i8> %k) {
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
-; X32-AVX-LABEL: PR42846:
-; X32-AVX:       # %bb.0:
-; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovdqa l, %ymm0
-; X32-AVX-NEXT:    vpextrw $0, %xmm0, (%eax)
-; X32-AVX-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; X32-AVX-NEXT:    vzeroupper
-; X32-AVX-NEXT:    retl
+; X86-AVX-LABEL: PR42846:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovdqa l, %ymm0
+; X86-AVX-NEXT:    vpextrw $0, %xmm0, (%eax)
+; X86-AVX-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X86-AVX-NEXT:    vzeroupper
+; X86-AVX-NEXT:    retl
   %t0 = load volatile <32 x i8>, <32 x i8>* @l, align 32
   %shuffle = shufflevector <32 x i8> %t0, <32 x i8> undef, <2 x i32> <i32 0, i32 1>
   store <2 x i8> %shuffle, <2 x i8>* %j, align 2

diff  --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll
index 94bb8db48546..5160b3c3f1ec 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll
@@ -3,7 +3,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL
 ;
 ; Just one 32-bit run to make sure we do reasonable things.
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X32-AVX512F
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X86-AVX512F
 
 define <8 x double> @merge_8f64_2f64_12u4(<2 x double>* %ptr) nounwind uwtable noinline ssp {
 ; ALL-LABEL: merge_8f64_2f64_12u4:
@@ -13,13 +13,13 @@ define <8 x double> @merge_8f64_2f64_12u4(<2 x double>* %ptr) nounwind uwtable n
 ; ALL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_8f64_2f64_12u4:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovups 16(%eax), %ymm0
-; X32-AVX512F-NEXT:    vinsertf128 $1, 64(%eax), %ymm0, %ymm1
-; X32-AVX512F-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_8f64_2f64_12u4:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovups 16(%eax), %ymm0
+; X86-AVX512F-NEXT:    vinsertf128 $1, 64(%eax), %ymm0, %ymm1
+; X86-AVX512F-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 1
   %ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
   %ptr3 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 4
@@ -41,14 +41,14 @@ define <8 x double> @merge_8f64_2f64_23z5(<2 x double>* %ptr) nounwind uwtable n
 ; ALL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_8f64_2f64_23z5:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovups 32(%eax), %ymm0
-; X32-AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX512F-NEXT:    vinsertf128 $1, 80(%eax), %ymm1, %ymm1
-; X32-AVX512F-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_8f64_2f64_23z5:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovups 32(%eax), %ymm0
+; X86-AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX512F-NEXT:    vinsertf128 $1, 80(%eax), %ymm1, %ymm1
+; X86-AVX512F-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
   %ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 3
   %ptr3 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 5
@@ -68,12 +68,12 @@ define <8 x double> @merge_8f64_4f64_z2(<4 x double>* %ptr) nounwind uwtable noi
 ; ALL-NEXT:    vinsertf64x4 $1, 64(%rdi), %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_8f64_4f64_z2:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX512F-NEXT:    vinsertf64x4 $1, 64(%eax), %zmm0, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_8f64_4f64_z2:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X86-AVX512F-NEXT:    vinsertf64x4 $1, 64(%eax), %zmm0, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr1 = getelementptr inbounds <4 x double>, <4 x double>* %ptr, i64 2
   %val1 = load <4 x double>, <4 x double>* %ptr1
   %res = shufflevector <4 x double> zeroinitializer, <4 x double> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -86,11 +86,11 @@ define <8 x double> @merge_8f64_f64_23uuuuu9(double* %ptr) nounwind uwtable noin
 ; ALL-NEXT:    vmovups 16(%rdi), %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_8f64_f64_23uuuuu9:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovups 16(%eax), %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_8f64_f64_23uuuuu9:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovups 16(%eax), %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 2
   %ptr1 = getelementptr inbounds double, double* %ptr, i64 3
   %ptr7 = getelementptr inbounds double, double* %ptr, i64 9
@@ -109,11 +109,11 @@ define <8 x double> @merge_8f64_f64_12zzuuzz(double* %ptr) nounwind uwtable noin
 ; ALL-NEXT:    vmovups 8(%rdi), %xmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_8f64_f64_12zzuuzz:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovups 8(%eax), %xmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_8f64_f64_12zzuuzz:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovups 8(%eax), %xmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 1
   %ptr1 = getelementptr inbounds double, double* %ptr, i64 2
   %val0 = load double, double* %ptr0
@@ -134,12 +134,12 @@ define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noin
 ; ALL-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_8f64_f64_1u3u5zu8:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovdqu64 8(%eax), %zmm0
-; X32-AVX512F-NEXT:    vpandq {{\.LCPI.*}}, %zmm0, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_8f64_f64_1u3u5zu8:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovdqu64 8(%eax), %zmm0
+; X86-AVX512F-NEXT:    vpandq {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 1
   %ptr2 = getelementptr inbounds double, double* %ptr, i64 3
   %ptr4 = getelementptr inbounds double, double* %ptr, i64 5
@@ -163,12 +163,12 @@ define <8 x i64> @merge_8i64_4i64_z3(<4 x i64>* %ptr) nounwind uwtable noinline
 ; ALL-NEXT:    vinsertf64x4 $1, 96(%rdi), %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_8i64_4i64_z3:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-AVX512F-NEXT:    vinsertf64x4 $1, 96(%eax), %zmm0, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_8i64_4i64_z3:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X86-AVX512F-NEXT:    vinsertf64x4 $1, 96(%eax), %zmm0, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr1 = getelementptr inbounds <4 x i64>, <4 x i64>* %ptr, i64 3
   %val1 = load <4 x i64>, <4 x i64>* %ptr1
   %res = shufflevector <4 x i64> zeroinitializer, <4 x i64> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -183,13 +183,13 @@ define <8 x i64> @merge_8i64_i64_56zz9uzz(i64* %ptr) nounwind uwtable noinline s
 ; ALL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_8i64_i64_56zz9uzz:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovups 40(%eax), %xmm0
-; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X32-AVX512F-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_8i64_i64_56zz9uzz:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovups 40(%eax), %xmm0
+; X86-AVX512F-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX512F-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 5
   %ptr1 = getelementptr inbounds i64, i64* %ptr, i64 6
   %ptr4 = getelementptr inbounds i64, i64* %ptr, i64 9
@@ -213,12 +213,12 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline s
 ; ALL-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovdqu64 8(%eax), %zmm0
-; X32-AVX512F-NEXT:    vpandq {{\.LCPI.*}}, %zmm0, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovdqu64 8(%eax), %zmm0
+; X86-AVX512F-NEXT:    vpandq {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
   %ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3
   %ptr4 = getelementptr inbounds i64, i64* %ptr, i64 5
@@ -241,11 +241,11 @@ define <16 x float> @merge_16f32_f32_89zzzuuuuuuuuuuuz(float* %ptr) nounwind uwt
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_16f32_f32_89zzzuuuuuuuuuuuz:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_16f32_f32_89zzzuuuuuuuuuuuz:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 8
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 9
   %val0 = load float, float* %ptr0
@@ -265,11 +265,11 @@ define <16 x float> @merge_16f32_f32_45u7uuuuuuuuuuuu(float* %ptr) nounwind uwta
 ; ALL-NEXT:    vmovups 16(%rdi), %xmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_16f32_f32_45u7uuuuuuuuuuuu:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovups 16(%eax), %xmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_16f32_f32_45u7uuuuuuuuuuuu:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovups 16(%eax), %xmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 4
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 5
   %ptr3 = getelementptr inbounds float, float* %ptr, i64 7
@@ -288,11 +288,11 @@ define <16 x float> @merge_16f32_f32_0uu3uuuuuuuuCuEF(float* %ptr) nounwind uwta
 ; ALL-NEXT:    vmovups (%rdi), %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_16f32_f32_0uu3uuuuuuuuCuEF:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovups (%eax), %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_16f32_f32_0uu3uuuuuuuuCuEF:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovups (%eax), %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 0
   %ptr3 = getelementptr inbounds float, float* %ptr, i64 3
   %ptrC = getelementptr inbounds float, float* %ptr, i64 12
@@ -320,14 +320,14 @@ define <16 x float> @merge_16f32_f32_0uu3zzuuuuuzCuEF(float* %ptr) nounwind uwta
 ; ALL-NEXT:    vpermi2ps %zmm2, %zmm1, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_16f32_f32_0uu3zzuuuuuzCuEF:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovups (%eax), %zmm1
-; X32-AVX512F-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; X32-AVX512F-NEXT:    vmovaps {{.*#+}} zmm0 = <0,u,u,3,20,21,u,u,u,u,u,u,12,29,14,15>
-; X32-AVX512F-NEXT:    vpermi2ps %zmm2, %zmm1, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_16f32_f32_0uu3zzuuuuuzCuEF:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovups (%eax), %zmm1
+; X86-AVX512F-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; X86-AVX512F-NEXT:    vmovaps {{.*#+}} zmm0 = <0,u,u,3,20,21,u,u,u,u,u,u,12,29,14,15>
+; X86-AVX512F-NEXT:    vpermi2ps %zmm2, %zmm1, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 0
   %ptr3 = getelementptr inbounds float, float* %ptr, i64 3
   %ptrC = getelementptr inbounds float, float* %ptr, i64 12
@@ -355,11 +355,11 @@ define <16 x i32> @merge_16i32_i32_12zzzuuuuuuuuuuuz(i32* %ptr) nounwind uwtable
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 1
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 2
   %val0 = load i32, i32* %ptr0
@@ -379,11 +379,11 @@ define <16 x i32> @merge_16i32_i32_23u5uuuuuuuuuuuu(i32* %ptr) nounwind uwtable
 ; ALL-NEXT:    vmovups 8(%rdi), %xmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_16i32_i32_23u5uuuuuuuuuuuu:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovups 8(%eax), %xmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_16i32_i32_23u5uuuuuuuuuuuu:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovups 8(%eax), %xmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 2
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 3
   %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 5
@@ -402,11 +402,11 @@ define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF(i32* %ptr) nounwind uwtable
 ; ALL-NEXT:    vmovups (%rdi), %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovups (%eax), %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovups (%eax), %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
   %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
   %ptrC = getelementptr inbounds i32, i32* %ptr, i64 12
@@ -432,12 +432,12 @@ define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable
 ; ALL-NEXT:    vpandd {{.*}}(%rip), %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovdqu64 (%eax), %zmm0
-; X32-AVX512F-NEXT:    vpandd {{\.LCPI.*}}, %zmm0, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovdqu64 (%eax), %zmm0
+; X86-AVX512F-NEXT:    vpandd {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
   %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
   %ptrC = getelementptr inbounds i32, i32* %ptr, i64 12
@@ -465,11 +465,11 @@ define <32 x i16> @merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz(i16* %ptr) n
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 1
   %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 2
   %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 4
@@ -490,11 +490,11 @@ define <32 x i16> @merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i16* %ptr) n
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4
   %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5
   %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 7
@@ -513,11 +513,11 @@ define <32 x i16> @merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu(i16* %ptr) n
 ; ALL-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 2
   %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 3
   %val0 = load i16, i16* %ptr0
@@ -538,11 +538,11 @@ define <64 x i8> @merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 1
   %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 2
   %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 4
@@ -569,11 +569,11 @@ define <64 x i8> @merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu
 ; ALL-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 1
   %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 2
   %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 4
@@ -604,14 +604,14 @@ define <8 x double> @merge_8f64_f64_23uuuuu9_volatile(double* %ptr) nounwind uwt
 ; ALL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_8f64_f64_23uuuuu9_volatile:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX512F-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
-; X32-AVX512F-NEXT:    vbroadcastsd 72(%eax), %ymm1
-; X32-AVX512F-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_8f64_f64_23uuuuu9_volatile:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512F-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
+; X86-AVX512F-NEXT:    vbroadcastsd 72(%eax), %ymm1
+; X86-AVX512F-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 2
   %ptr1 = getelementptr inbounds double, double* %ptr, i64 3
   %ptr7 = getelementptr inbounds double, double* %ptr, i64 9
@@ -636,17 +636,17 @@ define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile(i32* %ptr) nounwind
 ; ALL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; ALL-NEXT:    retq
 ;
-; X32-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile:
-; X32-AVX512F:       # %bb.0:
-; X32-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-AVX512F-NEXT:    vpinsrd $3, 12(%eax), %xmm0, %xmm0
-; X32-AVX512F-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-AVX512F-NEXT:    vpinsrd $2, 56(%eax), %xmm1, %xmm1
-; X32-AVX512F-NEXT:    vpinsrd $3, 60(%eax), %xmm1, %xmm1
-; X32-AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; X32-AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; X32-AVX512F-NEXT:    retl
+; X86-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile:
+; X86-AVX512F:       # %bb.0:
+; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512F-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512F-NEXT:    vpinsrd $3, 12(%eax), %xmm0, %xmm0
+; X86-AVX512F-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX512F-NEXT:    vpinsrd $2, 56(%eax), %xmm1, %xmm1
+; X86-AVX512F-NEXT:    vpinsrd $3, 60(%eax), %xmm1, %xmm1
+; X86-AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; X86-AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; X86-AVX512F-NEXT:    retl
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
   %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
   %ptrC = getelementptr inbounds i32, i32* %ptr, i64 12


        


More information about the llvm-commits mailing list