[llvm] r332118 - [DAGCombiner] Set the right SDLoc on a newly-created sextload (6/N)

Vedant Kumar via llvm-commits llvm-commits at lists.llvm.org
Fri May 11 11:40:08 PDT 2018


Author: vedantk
Date: Fri May 11 11:40:08 2018
New Revision: 332118

URL: http://llvm.org/viewvc/llvm-project?rev=332118&view=rev
Log:
[DAGCombiner] Set the right SDLoc on a newly-created sextload (6/N)

This teaches tryToFoldExtOfLoad to set the right location on a
newly-created extload. With that in place, the logic for performing a
certain ([s|z]ext (load ...)) combine becomes identical for sexts and
zexts, and we can get rid of one copy of the logic.

The test case churn is due to dependencies on IROrders inherited from
the wrong SDLoc.

Part of: llvm.org/PR37262

Differential Revision: https://reviews.llvm.org/D46158

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll
    llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll
    llvm/trunk/test/CodeGen/X86/fold-sext-trunc.ll
    llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll
    llvm/trunk/test/CodeGen/X86/pr32284.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll
    llvm/trunk/test/CodeGen/X86/widen_arith-4.ll
    llvm/trunk/test/CodeGen/X86/widen_arith-5.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=332118&r1=332117&r2=332118&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Fri May 11 11:40:08 2018
@@ -7782,9 +7782,9 @@ static SDValue tryToFoldExtOfLoad(Select
     return {};
 
   LoadSDNode *LN0 = cast<LoadSDNode>(N0);
-  SDValue ExtLoad =
-      DAG.getExtLoad(ExtLoadType, DL, VT, LN0->getChain(), LN0->getBasePtr(),
-                     N0.getValueType(), LN0->getMemOperand());
+  SDValue ExtLoad = DAG.getExtLoad(ExtLoadType, SDLoc(LN0), VT, LN0->getChain(),
+                                   LN0->getBasePtr(), N0.getValueType(),
+                                   LN0->getMemOperand());
   Combiner.ExtendSetCCUses(SetCCs, N0, ExtLoad, DL, ExtOpc);
   // If the load value is used only by N, replace it via CombineTo N.
   bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
@@ -8131,41 +8131,11 @@ SDValue DAGCombiner::visitZERO_EXTEND(SD
                        X, DAG.getConstant(Mask, DL, VT));
   }
 
-  // fold (zext (load x)) -> (zext (truncate (zextload x)))
-  // Only generate vector extloads when 1) they're legal, and 2) they are
-  // deemed desirable by the target.
-  if (ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
-      ((!LegalOperations && !VT.isVector() &&
-        !cast<LoadSDNode>(N0)->isVolatile()) ||
-       TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, N0.getValueType()))) {
-    bool DoXform = true;
-    SmallVector<SDNode*, 4> SetCCs;
-    if (!N0.hasOneUse())
-      DoXform = ExtendUsesToFormExtLoad(VT, N, N0, ISD::ZERO_EXTEND, SetCCs,
-                                        TLI);
-    if (VT.isVector())
-      DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0));
-    if (DoXform) {
-      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
-      SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), VT,
-                                       LN0->getChain(),
-                                       LN0->getBasePtr(), N0.getValueType(),
-                                       LN0->getMemOperand());
-
-      ExtendSetCCUses(SetCCs, N0, ExtLoad, SDLoc(N), ISD::ZERO_EXTEND);
-      // If the load value is used only by N, replace it via CombineTo N.
-      bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
-      CombineTo(N, ExtLoad);
-      if (NoReplaceTrunc) {
-        DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
-      } else {
-        SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
-                                    N0.getValueType(), ExtLoad);
-        CombineTo(LN0, Trunc, ExtLoad.getValue(1));
-      }
-      return SDValue(N, 0); // Return N so it doesn't get rechecked!
-    }
-  }
+  // Try to simplify (zext (load x)).
+  if (SDValue foldedExt =
+          tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
+                             SDLoc(N), ISD::ZEXTLOAD, ISD::ZERO_EXTEND))
+    return foldedExt;
 
   // fold (zext (load x)) to multiple smaller zextloads.
   // Only on illegal but splittable vectors.

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll?rev=332118&r1=332117&r2=332118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll Fri May 11 11:40:08 2018
@@ -67,14 +67,14 @@ define i32 @ldur_int(i32* %a) nounwind {
 ; Test sext + zext clustering.
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: ldp_half_sext_zext_int:%bb.0
-; CHECK: Cluster ld/st SU(4) - SU(3)
-; CHECK: SU(3):   undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
-; CHECK: SU(4):   %{{[0-9]+}}:gpr64 = LDRSWui
+; CHECK: Cluster ld/st SU(3) - SU(4)
+; CHECK: SU(3):   %{{[0-9]+}}:gpr64 = LDRSWui
+; CHECK: SU(4):   undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
 ; EXYNOSM1: ********** MI Scheduling **********
 ; EXYNOSM1-LABEL: ldp_half_sext_zext_int:%bb.0
-; EXYNOSM1: Cluster ld/st SU(4) - SU(3)
-; EXYNOSM1: SU(3):   undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
-; EXYNOSM1: SU(4):   %{{[0-9]+}}:gpr64 = LDRSWui
+; EXYNOSM1: Cluster ld/st SU(3) - SU(4)
+; EXYNOSM1: SU(3):   %{{[0-9]+}}:gpr64 = LDRSWui
+; EXYNOSM1: SU(4):   undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
 define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
   %tmp0 = load i64, i64* %q, align 4
   %tmp = load i32, i32* %p, align 4

Modified: llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll?rev=332118&r1=332117&r2=332118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll Fri May 11 11:40:08 2018
@@ -1949,6 +1949,8 @@ define i96 @test_insertelement_variable_
 ; KNL-NEXT:    vpinsrb $14, 208(%rbp), %xmm3, %xmm3
 ; KNL-NEXT:    vpinsrb $15, 216(%rbp), %xmm3, %xmm3
 ; KNL-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; KNL-NEXT:    movl 744(%rbp), %eax
+; KNL-NEXT:    andl $127, %eax
 ; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; KNL-NEXT:    vpcmpeqb %ymm3, %ymm2, %ymm2
 ; KNL-NEXT:    vpternlogq $15, %zmm2, %zmm2, %zmm2
@@ -1956,8 +1958,6 @@ define i96 @test_insertelement_variable_
 ; KNL-NEXT:    vpternlogq $15, %zmm1, %zmm1, %zmm1
 ; KNL-NEXT:    vpcmpeqb %ymm3, %ymm0, %ymm0
 ; KNL-NEXT:    vpternlogq $15, %zmm0, %zmm0, %zmm0
-; KNL-NEXT:    movl 744(%rbp), %eax
-; KNL-NEXT:    andl $127, %eax
 ; KNL-NEXT:    cmpb $0, 736(%rbp)
 ; KNL-NEXT:    vmovdqa %ymm3, {{[0-9]+}}(%rsp)
 ; KNL-NEXT:    vmovdqa %ymm0, {{[0-9]+}}(%rsp)
@@ -2130,10 +2130,10 @@ define i96 @test_insertelement_variable_
 ; SKX-NEXT:    vpinsrb $14, 720(%rbp), %xmm2, %xmm2
 ; SKX-NEXT:    vpinsrb $15, 728(%rbp), %xmm2, %xmm2
 ; SKX-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; SKX-NEXT:    vptestmb %zmm0, %zmm0, %k0
-; SKX-NEXT:    vptestmb %zmm1, %zmm1, %k1
 ; SKX-NEXT:    movl 744(%rbp), %eax
 ; SKX-NEXT:    andl $127, %eax
+; SKX-NEXT:    vptestmb %zmm0, %zmm0, %k0
+; SKX-NEXT:    vptestmb %zmm1, %zmm1, %k1
 ; SKX-NEXT:    cmpb $0, 736(%rbp)
 ; SKX-NEXT:    vpmovm2b %k1, %zmm0
 ; SKX-NEXT:    vmovdqa64 %zmm0, {{[0-9]+}}(%rsp)

Modified: llvm/trunk/test/CodeGen/X86/fold-sext-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-sext-trunc.ll?rev=332118&r1=332117&r2=332118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-sext-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-sext-trunc.ll Fri May 11 11:40:08 2018
@@ -1,20 +1,61 @@
 ; RUN: llc < %s -mtriple=x86_64-- | grep movslq | count 1
+; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -stop-after livedebugvalues -o - | FileCheck %s -check-prefix=MIR
 ; PR4050
 
-	%0 = type { i64 }		; type %0
-	%struct.S1 = type { i16, i32 }
- at g_10 = external global %struct.S1		; <%struct.S1*> [#uses=2]
+%0 = type { i64 }
+%struct.S1 = type { i16, i32 }
+
+ at g_10 = external global %struct.S1
 
 declare void @func_28(i64, i64)
 
-define void @int322(i32 %foo) nounwind {
+define void @int322(i32 %foo) !dbg !5 {
 entry:
-	%val = load i64, i64* getelementptr (%0, %0* bitcast (%struct.S1* @g_10 to %0*), i32 0, i32 0)		; <i64> [#uses=1]
-	%0 = load i32, i32* getelementptr (%struct.S1, %struct.S1* @g_10, i32 0, i32 1), align 4		; <i32> [#uses=1]
-	%1 = sext i32 %0 to i64		; <i64> [#uses=1]
-	%tmp4.i = lshr i64 %val, 32		; <i64> [#uses=1]
-	%tmp5.i = trunc i64 %tmp4.i to i32		; <i32> [#uses=1]
-	%2 = sext i32 %tmp5.i to i64		; <i64> [#uses=1]
-	tail call void @func_28(i64 %2, i64 %1) nounwind
-	ret void
+  %val = load i64, i64* getelementptr (%0, %0* bitcast (%struct.S1* @g_10 to %0*), i32 0, i32 0), !dbg !16
+  %0 = load i32, i32* getelementptr inbounds (%struct.S1, %struct.S1* @g_10, i32 0, i32 1), align 4, !dbg !17
+; MIR: renamable $rax = MOVSX64rm32 {{.*}}, @g_10 + 4,{{.*}} debug-location !17 :: (dereferenceable load 4 from `i64* getelementptr (%0, %0* bitcast (%struct.S1* @g_10 to %0*), i32 0, i32 0)` + 4)
+  %1 = sext i32 %0 to i64, !dbg !18
+  %tmp4.i = lshr i64 %val, 32, !dbg !19
+  %tmp5.i = trunc i64 %tmp4.i to i32, !dbg !20
+  %2 = sext i32 %tmp5.i to i64, !dbg !21
+  tail call void @func_28(i64 %2, i64 %1) #0, !dbg !22
+  call void @llvm.dbg.value(metadata i64 %val, metadata !8, metadata !DIExpression()), !dbg !16
+  call void @llvm.dbg.value(metadata i32 %0, metadata !10, metadata !DIExpression()), !dbg !17
+  call void @llvm.dbg.value(metadata i64 %1, metadata !12, metadata !DIExpression()), !dbg !18
+  call void @llvm.dbg.value(metadata i64 %tmp4.i, metadata !13, metadata !DIExpression()), !dbg !19
+  call void @llvm.dbg.value(metadata i32 %tmp5.i, metadata !14, metadata !DIExpression()), !dbg !20
+  call void @llvm.dbg.value(metadata i64 %2, metadata !15, metadata !DIExpression()), !dbg !21
+  ret void, !dbg !23
 }
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "/Users/vsk/src/llvm.org-master/llvm/test/CodeGen/X86/fold-sext-trunc.ll", directory: "/")
+!2 = !{}
+!3 = !{i32 8}
+!4 = !{i32 6}
+!5 = distinct !DISubprogram(name: "int322", linkageName: "int322", scope: null, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !7)
+!6 = !DISubroutineType(types: !2)
+!7 = !{!8, !10, !12, !13, !14, !15}
+!8 = !DILocalVariable(name: "1", scope: !5, file: !1, line: 1, type: !9)
+!9 = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned)
+!10 = !DILocalVariable(name: "2", scope: !5, file: !1, line: 2, type: !11)
+!11 = !DIBasicType(name: "ty32", size: 32, encoding: DW_ATE_unsigned)
+!12 = !DILocalVariable(name: "3", scope: !5, file: !1, line: 3, type: !9)
+!13 = !DILocalVariable(name: "4", scope: !5, file: !1, line: 4, type: !9)
+!14 = !DILocalVariable(name: "5", scope: !5, file: !1, line: 5, type: !11)
+!15 = !DILocalVariable(name: "6", scope: !5, file: !1, line: 6, type: !9)
+!16 = !DILocation(line: 1, column: 1, scope: !5)
+!17 = !DILocation(line: 2, column: 1, scope: !5)
+!18 = !DILocation(line: 3, column: 1, scope: !5)
+!19 = !DILocation(line: 4, column: 1, scope: !5)
+!20 = !DILocation(line: 5, column: 1, scope: !5)
+!21 = !DILocation(line: 6, column: 1, scope: !5)
+!22 = !DILocation(line: 7, column: 1, scope: !5)
+!23 = !DILocation(line: 8, column: 1, scope: !5)
+!24 = !{i32 2, !"Debug Info Version", i32 3}
+!llvm.module.flags = !{!24}

Modified: llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll?rev=332118&r1=332117&r2=332118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll Fri May 11 11:40:08 2018
@@ -25,10 +25,10 @@ define <2 x double> @signbits_sext_v2i64
 define <4 x float> @signbits_sext_v4i64_sitofp_v4f32(i8 signext %a0, i16 signext %a1, i32 %a2, i32 %a3) nounwind {
 ; X32-LABEL: signbits_sext_v4i64_sitofp_v4f32:
 ; X32:       # %bb.0:
-; X32-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movswl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vmovd %eax, %xmm0
-; X32-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
+; X32-NEXT:    movswl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movsbl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    vmovd %ecx, %xmm0
+; X32-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
 ; X32-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; X32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
 ; X32-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
@@ -270,6 +270,7 @@ define float @signbits_ashr_sext_sextinr
 ; X32-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
 ; X32:       # %bb.0:
 ; X32-NEXT:    pushl %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
 ; X32-NEXT:    vpsrlq $60, %xmm2, %xmm3
 ; X32-NEXT:    vpsrlq $61, %xmm2, %xmm2
@@ -279,7 +280,6 @@ define float @signbits_ashr_sext_sextinr
 ; X32-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
 ; X32-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; X32-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    vpinsrd $0, %eax, %xmm1, %xmm1
 ; X32-NEXT:    sarl $31, %eax
 ; X32-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/pr32284.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pr32284.ll?rev=332118&r1=332117&r2=332118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pr32284.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pr32284.ll Fri May 11 11:40:08 2018
@@ -202,31 +202,31 @@ define void @f1() {
 ; 686-O0-NEXT:    setne {{[0-9]+}}(%esp)
 ; 686-O0-NEXT:    movl var_5, %ecx
 ; 686-O0-NEXT:    movl %ecx, %edx
-; 686-O0-NEXT:    subl $-1, %edx
+; 686-O0-NEXT:    sarl $31, %edx
+; 686-O0-NEXT:    movl %ecx, %esi
+; 686-O0-NEXT:    subl $-1, %esi
 ; 686-O0-NEXT:    sete %bl
-; 686-O0-NEXT:    movzbl %bl, %esi
-; 686-O0-NEXT:    movl %ecx, %edi
-; 686-O0-NEXT:    sarl $31, %edi
+; 686-O0-NEXT:    movzbl %bl, %edi
 ; 686-O0-NEXT:    xorl %ebp, %ebp
 ; 686-O0-NEXT:    addl $7093, %ecx # imm = 0x1BB5
-; 686-O0-NEXT:    adcxl %ebp, %edi
-; 686-O0-NEXT:    subl %esi, %ecx
-; 686-O0-NEXT:    sbbl $0, %edi
+; 686-O0-NEXT:    adcxl %ebp, %edx
+; 686-O0-NEXT:    subl %edi, %ecx
+; 686-O0-NEXT:    sbbl $0, %edx
 ; 686-O0-NEXT:    setl %bl
-; 686-O0-NEXT:    movzbl %bl, %esi
-; 686-O0-NEXT:    movl %esi, var_57
+; 686-O0-NEXT:    movzbl %bl, %edi
+; 686-O0-NEXT:    movl %edi, var_57
 ; 686-O0-NEXT:    movl $0, var_57+4
-; 686-O0-NEXT:    movl var_5, %esi
-; 686-O0-NEXT:    subl $-1, %esi
+; 686-O0-NEXT:    movl var_5, %edi
+; 686-O0-NEXT:    subl $-1, %edi
 ; 686-O0-NEXT:    sete %bl
 ; 686-O0-NEXT:    movzbl %bl, %ebp
 ; 686-O0-NEXT:    movl %ebp, _ZN8struct_210member_2_0E
 ; 686-O0-NEXT:    movl $0, _ZN8struct_210member_2_0E+4
-; 686-O0-NEXT:    movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; 686-O0-NEXT:    movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; 686-O0-NEXT:    movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; 686-O0-NEXT:    movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; 686-O0-NEXT:    movl %esi, (%esp) # 4-byte Spill
+; 686-O0-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; 686-O0-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; 686-O0-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; 686-O0-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; 686-O0-NEXT:    movl %edi, (%esp) # 4-byte Spill
 ; 686-O0-NEXT:    addl $24, %esp
 ; 686-O0-NEXT:    .cfi_def_cfa_offset 20
 ; 686-O0-NEXT:    popl %esi

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll?rev=332118&r1=332117&r2=332118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll Fri May 11 11:40:08 2018
@@ -244,6 +244,10 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSE2-NEXT:    # kill: def $edx killed $edx def $rdx
 ; SSE2-NEXT:    # kill: def $esi killed $esi def $rsi
 ; SSE2-NEXT:    # kill: def $edi killed $edi def $rdi
+; SSE2-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
+; SSE2-NEXT:    andl $7, %r10d
+; SSE2-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT:    andl $7, %eax
 ; SSE2-NEXT:    andl $7, %edi
 ; SSE2-NEXT:    andl $7, %esi
 ; SSE2-NEXT:    andl $7, %edx
@@ -251,10 +255,6 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSE2-NEXT:    andl $7, %r8d
 ; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    andl $7, %r9d
-; SSE2-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
-; SSE2-NEXT:    andl $7, %r10d
-; SSE2-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT:    andl $7, %eax
 ; SSE2-NEXT:    movzwl -24(%rsp,%rcx,2), %ecx
 ; SSE2-NEXT:    movd %ecx, %xmm0
 ; SSE2-NEXT:    movzwl -24(%rsp,%rdx,2), %ecx
@@ -288,6 +288,10 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSSE3-NEXT:    # kill: def $edx killed $edx def $rdx
 ; SSSE3-NEXT:    # kill: def $esi killed $esi def $rsi
 ; SSSE3-NEXT:    # kill: def $edi killed $edi def $rdi
+; SSSE3-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
+; SSSE3-NEXT:    andl $7, %r10d
+; SSSE3-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT:    andl $7, %eax
 ; SSSE3-NEXT:    andl $7, %edi
 ; SSSE3-NEXT:    andl $7, %esi
 ; SSSE3-NEXT:    andl $7, %edx
@@ -295,10 +299,6 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSSE3-NEXT:    andl $7, %r8d
 ; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSSE3-NEXT:    andl $7, %r9d
-; SSSE3-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
-; SSSE3-NEXT:    andl $7, %r10d
-; SSSE3-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
-; SSSE3-NEXT:    andl $7, %eax
 ; SSSE3-NEXT:    movzwl -24(%rsp,%rcx,2), %ecx
 ; SSSE3-NEXT:    movd %ecx, %xmm0
 ; SSSE3-NEXT:    movzwl -24(%rsp,%rdx,2), %ecx
@@ -332,6 +332,10 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSE41-NEXT:    # kill: def $edx killed $edx def $rdx
 ; SSE41-NEXT:    # kill: def $esi killed $esi def $rsi
 ; SSE41-NEXT:    # kill: def $edi killed $edi def $rdi
+; SSE41-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
+; SSE41-NEXT:    andl $7, %r10d
+; SSE41-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
+; SSE41-NEXT:    andl $7, %eax
 ; SSE41-NEXT:    andl $7, %edi
 ; SSE41-NEXT:    andl $7, %esi
 ; SSE41-NEXT:    andl $7, %edx
@@ -339,10 +343,6 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSE41-NEXT:    andl $7, %r8d
 ; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE41-NEXT:    andl $7, %r9d
-; SSE41-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
-; SSE41-NEXT:    andl $7, %r10d
-; SSE41-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
-; SSE41-NEXT:    andl $7, %eax
 ; SSE41-NEXT:    movzwl -24(%rsp,%rdi,2), %edi
 ; SSE41-NEXT:    movd %edi, %xmm0
 ; SSE41-NEXT:    pinsrw $1, -24(%rsp,%rsi,2), %xmm0
@@ -350,8 +350,8 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; SSE41-NEXT:    pinsrw $3, -24(%rsp,%rcx,2), %xmm0
 ; SSE41-NEXT:    pinsrw $4, -24(%rsp,%r8,2), %xmm0
 ; SSE41-NEXT:    pinsrw $5, -24(%rsp,%r9,2), %xmm0
-; SSE41-NEXT:    pinsrw $6, -24(%rsp,%r10,2), %xmm0
-; SSE41-NEXT:    pinsrw $7, -24(%rsp,%rax,2), %xmm0
+; SSE41-NEXT:    pinsrw $6, -24(%rsp,%rax,2), %xmm0
+; SSE41-NEXT:    pinsrw $7, -24(%rsp,%r10,2), %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
@@ -362,6 +362,10 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; AVX-NEXT:    # kill: def $edx killed $edx def $rdx
 ; AVX-NEXT:    # kill: def $esi killed $esi def $rsi
 ; AVX-NEXT:    # kill: def $edi killed $edi def $rdi
+; AVX-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
+; AVX-NEXT:    andl $7, %r10d
+; AVX-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
+; AVX-NEXT:    andl $7, %eax
 ; AVX-NEXT:    andl $7, %edi
 ; AVX-NEXT:    andl $7, %esi
 ; AVX-NEXT:    andl $7, %edx
@@ -369,10 +373,6 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; AVX-NEXT:    andl $7, %r8d
 ; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    andl $7, %r9d
-; AVX-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d
-; AVX-NEXT:    andl $7, %r10d
-; AVX-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax
-; AVX-NEXT:    andl $7, %eax
 ; AVX-NEXT:    movzwl -24(%rsp,%rdi,2), %edi
 ; AVX-NEXT:    vmovd %edi, %xmm0
 ; AVX-NEXT:    vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0
@@ -380,8 +380,8 @@ define <8 x i16> @var_shuffle_v8i16_v8i1
 ; AVX-NEXT:    vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
 ; AVX-NEXT:    vpinsrw $4, -24(%rsp,%r8,2), %xmm0, %xmm0
 ; AVX-NEXT:    vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0
-; AVX-NEXT:    vpinsrw $6, -24(%rsp,%r10,2), %xmm0, %xmm0
-; AVX-NEXT:    vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $7, -24(%rsp,%r10,2), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x0 = extractelement <8 x i16> %x, i16 %i0
   %x1 = extractelement <8 x i16> %x, i16 %i1
@@ -411,9 +411,9 @@ define <16 x i8> @var_shuffle_v16i8_v16i
 ; SSE2-NEXT:    # kill: def $edx killed $edx def $rdx
 ; SSE2-NEXT:    # kill: def $esi killed $esi def $rsi
 ; SSE2-NEXT:    # kill: def $edi killed $edi def $rdi
-; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    andl $15, %eax
+; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    movzbl -24(%rsp,%rax), %eax
 ; SSE2-NEXT:    movd %eax, %xmm8
 ; SSE2-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
@@ -495,9 +495,9 @@ define <16 x i8> @var_shuffle_v16i8_v16i
 ; SSSE3-NEXT:    # kill: def $edx killed $edx def $rdx
 ; SSSE3-NEXT:    # kill: def $esi killed $esi def $rsi
 ; SSSE3-NEXT:    # kill: def $edi killed $edi def $rdi
-; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSSE3-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
 ; SSSE3-NEXT:    andl $15, %eax
+; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSSE3-NEXT:    movzbl -24(%rsp,%rax), %eax
 ; SSSE3-NEXT:    movd %eax, %xmm8
 ; SSSE3-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax
@@ -723,12 +723,12 @@ define <4 x i32> @mem_shuffle_v4i32_v4i3
 ; SSE2-NEXT:    movl (%rdi), %eax
 ; SSE2-NEXT:    movl 4(%rdi), %ecx
 ; SSE2-NEXT:    andl $3, %eax
-; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    andl $3, %ecx
 ; SSE2-NEXT:    movl 8(%rdi), %edx
 ; SSE2-NEXT:    andl $3, %edx
 ; SSE2-NEXT:    movl 12(%rdi), %esi
 ; SSE2-NEXT:    andl $3, %esi
+; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -743,12 +743,12 @@ define <4 x i32> @mem_shuffle_v4i32_v4i3
 ; SSSE3-NEXT:    movl (%rdi), %eax
 ; SSSE3-NEXT:    movl 4(%rdi), %ecx
 ; SSSE3-NEXT:    andl $3, %eax
-; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSSE3-NEXT:    andl $3, %ecx
 ; SSSE3-NEXT:    movl 8(%rdi), %edx
 ; SSSE3-NEXT:    andl $3, %edx
 ; SSSE3-NEXT:    movl 12(%rdi), %esi
 ; SSSE3-NEXT:    andl $3, %esi
+; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSSE3-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSSE3-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -763,12 +763,12 @@ define <4 x i32> @mem_shuffle_v4i32_v4i3
 ; SSE41-NEXT:    movl (%rdi), %eax
 ; SSE41-NEXT:    movl 4(%rdi), %ecx
 ; SSE41-NEXT:    andl $3, %eax
-; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE41-NEXT:    andl $3, %ecx
 ; SSE41-NEXT:    movl 8(%rdi), %edx
 ; SSE41-NEXT:    andl $3, %edx
 ; SSE41-NEXT:    movl 12(%rdi), %esi
 ; SSE41-NEXT:    andl $3, %esi
+; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; SSE41-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE41-NEXT:    pinsrd $1, -24(%rsp,%rcx,4), %xmm0
 ; SSE41-NEXT:    pinsrd $2, -24(%rsp,%rdx,4), %xmm0
@@ -780,12 +780,12 @@ define <4 x i32> @mem_shuffle_v4i32_v4i3
 ; AVX-NEXT:    movl (%rdi), %eax
 ; AVX-NEXT:    movl 4(%rdi), %ecx
 ; AVX-NEXT:    andl $3, %eax
-; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    andl $3, %ecx
 ; AVX-NEXT:    movl 8(%rdi), %edx
 ; AVX-NEXT:    andl $3, %edx
 ; AVX-NEXT:    movl 12(%rdi), %esi
 ; AVX-NEXT:    andl $3, %esi
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vpinsrd $1, -24(%rsp,%rcx,4), %xmm0, %xmm0
 ; AVX-NEXT:    vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
@@ -813,68 +813,76 @@ define <4 x i32> @mem_shuffle_v4i32_v4i3
 define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8* %i) nounwind {
 ; SSE2-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
 ; SSE2:       # %bb.0:
+; SSE2-NEXT:    pushq %rbp
+; SSE2-NEXT:    pushq %r15
+; SSE2-NEXT:    pushq %r14
+; SSE2-NEXT:    pushq %r13
+; SSE2-NEXT:    pushq %r12
+; SSE2-NEXT:    pushq %rbx
 ; SSE2-NEXT:    movzbl (%rdi), %eax
-; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT:    movzbl 15(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm8
-; SSE2-NEXT:    movzbl 14(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm15
-; SSE2-NEXT:    movzbl 13(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm9
-; SSE2-NEXT:    movzbl 12(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm3
-; SSE2-NEXT:    movzbl 11(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm10
+; SSE2-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT:    movzbl 1(%rdi), %r9d
+; SSE2-NEXT:    movzbl 2(%rdi), %r10d
+; SSE2-NEXT:    movzbl 3(%rdi), %r11d
+; SSE2-NEXT:    movzbl 4(%rdi), %r14d
+; SSE2-NEXT:    movzbl 5(%rdi), %r15d
+; SSE2-NEXT:    movzbl 6(%rdi), %r12d
+; SSE2-NEXT:    movzbl 7(%rdi), %r13d
+; SSE2-NEXT:    movzbl 8(%rdi), %ebx
+; SSE2-NEXT:    movzbl 9(%rdi), %r8d
 ; SSE2-NEXT:    movzbl 10(%rdi), %ecx
+; SSE2-NEXT:    movzbl 11(%rdi), %edx
+; SSE2-NEXT:    movzbl 12(%rdi), %esi
+; SSE2-NEXT:    movzbl 13(%rdi), %ebp
+; SSE2-NEXT:    movzbl 14(%rdi), %eax
+; SSE2-NEXT:    movzbl 15(%rdi), %edi
+; SSE2-NEXT:    andl $15, %edi
+; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movzbl -24(%rsp,%rdi), %edi
+; SSE2-NEXT:    movd %edi, %xmm8
+; SSE2-NEXT:    andl $15, %eax
+; SSE2-NEXT:    movzbl -24(%rsp,%rax), %eax
+; SSE2-NEXT:    movd %eax, %xmm15
+; SSE2-NEXT:    andl $15, %ebp
+; SSE2-NEXT:    movzbl -24(%rsp,%rbp), %eax
+; SSE2-NEXT:    movd %eax, %xmm9
+; SSE2-NEXT:    andl $15, %esi
+; SSE2-NEXT:    movzbl -24(%rsp,%rsi), %eax
+; SSE2-NEXT:    movd %eax, %xmm3
+; SSE2-NEXT:    andl $15, %edx
+; SSE2-NEXT:    movzbl -24(%rsp,%rdx), %eax
+; SSE2-NEXT:    movd %eax, %xmm10
 ; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm7
-; SSE2-NEXT:    movzbl 9(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm11
-; SSE2-NEXT:    movzbl 8(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm6
-; SSE2-NEXT:    movzbl 7(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm12
-; SSE2-NEXT:    movzbl 6(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm5
-; SSE2-NEXT:    movzbl 5(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm13
-; SSE2-NEXT:    movzbl 4(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm4
-; SSE2-NEXT:    movzbl 3(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm14
-; SSE2-NEXT:    movzbl 2(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm1
-; SSE2-NEXT:    movzbl 1(%rdi), %ecx
-; SSE2-NEXT:    andl $15, %ecx
-; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSE2-NEXT:    movd %ecx, %xmm2
+; SSE2-NEXT:    movzbl -24(%rsp,%rcx), %eax
+; SSE2-NEXT:    movd %eax, %xmm7
+; SSE2-NEXT:    andl $15, %r8d
+; SSE2-NEXT:    movzbl -24(%rsp,%r8), %eax
+; SSE2-NEXT:    movd %eax, %xmm11
+; SSE2-NEXT:    andl $15, %ebx
+; SSE2-NEXT:    movzbl -24(%rsp,%rbx), %eax
+; SSE2-NEXT:    movd %eax, %xmm6
+; SSE2-NEXT:    andl $15, %r13d
+; SSE2-NEXT:    movzbl -24(%rsp,%r13), %eax
+; SSE2-NEXT:    movd %eax, %xmm12
+; SSE2-NEXT:    andl $15, %r12d
+; SSE2-NEXT:    movzbl -24(%rsp,%r12), %eax
+; SSE2-NEXT:    movd %eax, %xmm5
+; SSE2-NEXT:    andl $15, %r15d
+; SSE2-NEXT:    movzbl -24(%rsp,%r15), %eax
+; SSE2-NEXT:    movd %eax, %xmm13
+; SSE2-NEXT:    andl $15, %r14d
+; SSE2-NEXT:    movzbl -24(%rsp,%r14), %eax
+; SSE2-NEXT:    movd %eax, %xmm4
+; SSE2-NEXT:    andl $15, %r11d
+; SSE2-NEXT:    movzbl -24(%rsp,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm14
+; SSE2-NEXT:    andl $15, %r10d
+; SSE2-NEXT:    movzbl -24(%rsp,%r10), %eax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    andl $15, %r9d
+; SSE2-NEXT:    movzbl -24(%rsp,%r9), %eax
+; SSE2-NEXT:    movd %eax, %xmm2
+; SSE2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
 ; SSE2-NEXT:    andl $15, %eax
 ; SSE2-NEXT:    movzbl -24(%rsp,%rax), %eax
 ; SSE2-NEXT:    movd %eax, %xmm0
@@ -893,72 +901,86 @@ define <16 x i8> @mem_shuffle_v16i8_v16i
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
+; SSE2-NEXT:    popq %rbx
+; SSE2-NEXT:    popq %r12
+; SSE2-NEXT:    popq %r13
+; SSE2-NEXT:    popq %r14
+; SSE2-NEXT:    popq %r15
+; SSE2-NEXT:    popq %rbp
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
 ; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pushq %rbp
+; SSSE3-NEXT:    pushq %r15
+; SSSE3-NEXT:    pushq %r14
+; SSSE3-NEXT:    pushq %r13
+; SSSE3-NEXT:    pushq %r12
+; SSSE3-NEXT:    pushq %rbx
 ; SSSE3-NEXT:    movzbl (%rdi), %eax
-; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT:    movzbl 15(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm8
-; SSSE3-NEXT:    movzbl 14(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm15
-; SSSE3-NEXT:    movzbl 13(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm9
-; SSSE3-NEXT:    movzbl 12(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm3
-; SSSE3-NEXT:    movzbl 11(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm10
+; SSSE3-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSSE3-NEXT:    movzbl 1(%rdi), %r9d
+; SSSE3-NEXT:    movzbl 2(%rdi), %r10d
+; SSSE3-NEXT:    movzbl 3(%rdi), %r11d
+; SSSE3-NEXT:    movzbl 4(%rdi), %r14d
+; SSSE3-NEXT:    movzbl 5(%rdi), %r15d
+; SSSE3-NEXT:    movzbl 6(%rdi), %r12d
+; SSSE3-NEXT:    movzbl 7(%rdi), %r13d
+; SSSE3-NEXT:    movzbl 8(%rdi), %ebx
+; SSSE3-NEXT:    movzbl 9(%rdi), %r8d
 ; SSSE3-NEXT:    movzbl 10(%rdi), %ecx
+; SSSE3-NEXT:    movzbl 11(%rdi), %edx
+; SSSE3-NEXT:    movzbl 12(%rdi), %esi
+; SSSE3-NEXT:    movzbl 13(%rdi), %ebp
+; SSSE3-NEXT:    movzbl 14(%rdi), %eax
+; SSSE3-NEXT:    movzbl 15(%rdi), %edi
+; SSSE3-NEXT:    andl $15, %edi
+; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movzbl -24(%rsp,%rdi), %edi
+; SSSE3-NEXT:    movd %edi, %xmm8
+; SSSE3-NEXT:    andl $15, %eax
+; SSSE3-NEXT:    movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT:    movd %eax, %xmm15
+; SSSE3-NEXT:    andl $15, %ebp
+; SSSE3-NEXT:    movzbl -24(%rsp,%rbp), %eax
+; SSSE3-NEXT:    movd %eax, %xmm9
+; SSSE3-NEXT:    andl $15, %esi
+; SSSE3-NEXT:    movzbl -24(%rsp,%rsi), %eax
+; SSSE3-NEXT:    movd %eax, %xmm3
+; SSSE3-NEXT:    andl $15, %edx
+; SSSE3-NEXT:    movzbl -24(%rsp,%rdx), %eax
+; SSSE3-NEXT:    movd %eax, %xmm10
 ; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm7
-; SSSE3-NEXT:    movzbl 9(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm11
-; SSSE3-NEXT:    movzbl 8(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm6
-; SSSE3-NEXT:    movzbl 7(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm12
-; SSSE3-NEXT:    movzbl 6(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm5
-; SSSE3-NEXT:    movzbl 5(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm13
-; SSSE3-NEXT:    movzbl 4(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm4
-; SSSE3-NEXT:    movzbl 3(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm14
-; SSSE3-NEXT:    movzbl 2(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm1
-; SSSE3-NEXT:    movzbl 1(%rdi), %ecx
-; SSSE3-NEXT:    andl $15, %ecx
-; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm2
+; SSSE3-NEXT:    movzbl -24(%rsp,%rcx), %eax
+; SSSE3-NEXT:    movd %eax, %xmm7
+; SSSE3-NEXT:    andl $15, %r8d
+; SSSE3-NEXT:    movzbl -24(%rsp,%r8), %eax
+; SSSE3-NEXT:    movd %eax, %xmm11
+; SSSE3-NEXT:    andl $15, %ebx
+; SSSE3-NEXT:    movzbl -24(%rsp,%rbx), %eax
+; SSSE3-NEXT:    movd %eax, %xmm6
+; SSSE3-NEXT:    andl $15, %r13d
+; SSSE3-NEXT:    movzbl -24(%rsp,%r13), %eax
+; SSSE3-NEXT:    movd %eax, %xmm12
+; SSSE3-NEXT:    andl $15, %r12d
+; SSSE3-NEXT:    movzbl -24(%rsp,%r12), %eax
+; SSSE3-NEXT:    movd %eax, %xmm5
+; SSSE3-NEXT:    andl $15, %r15d
+; SSSE3-NEXT:    movzbl -24(%rsp,%r15), %eax
+; SSSE3-NEXT:    movd %eax, %xmm13
+; SSSE3-NEXT:    andl $15, %r14d
+; SSSE3-NEXT:    movzbl -24(%rsp,%r14), %eax
+; SSSE3-NEXT:    movd %eax, %xmm4
+; SSSE3-NEXT:    andl $15, %r11d
+; SSSE3-NEXT:    movzbl -24(%rsp,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm14
+; SSSE3-NEXT:    andl $15, %r10d
+; SSSE3-NEXT:    movzbl -24(%rsp,%r10), %eax
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    andl $15, %r9d
+; SSSE3-NEXT:    movzbl -24(%rsp,%r9), %eax
+; SSSE3-NEXT:    movd %eax, %xmm2
+; SSSE3-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
 ; SSSE3-NEXT:    andl $15, %eax
 ; SSSE3-NEXT:    movzbl -24(%rsp,%rax), %eax
 ; SSSE3-NEXT:    movd %eax, %xmm0
@@ -977,114 +999,148 @@ define <16 x i8> @mem_shuffle_v16i8_v16i
 ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
 ; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
+; SSSE3-NEXT:    popq %rbx
+; SSSE3-NEXT:    popq %r12
+; SSSE3-NEXT:    popq %r13
+; SSSE3-NEXT:    popq %r14
+; SSSE3-NEXT:    popq %r15
+; SSSE3-NEXT:    popq %rbp
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movzbl (%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE41-NEXT:    movzbl -24(%rsp,%rax), %eax
-; SSE41-NEXT:    movd %eax, %xmm0
-; SSE41-NEXT:    movzbl 1(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $1, -24(%rsp,%rax), %xmm0
+; SSE41-NEXT:    pushq %rbp
+; SSE41-NEXT:    pushq %r15
+; SSE41-NEXT:    pushq %r14
+; SSE41-NEXT:    pushq %r13
+; SSE41-NEXT:    pushq %r12
+; SSE41-NEXT:    pushq %rbx
+; SSE41-NEXT:    movzbl (%rdi), %r9d
+; SSE41-NEXT:    andl $15, %r9d
+; SSE41-NEXT:    movzbl 1(%rdi), %ebx
 ; SSE41-NEXT:    movzbl 2(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $2, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 3(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $3, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 4(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $4, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 5(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $5, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 6(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $6, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 7(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $7, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 8(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $8, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 9(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $9, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 10(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $10, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 11(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $11, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 12(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $12, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 13(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $13, -24(%rsp,%rax), %xmm0
+; SSE41-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE41-NEXT:    movzbl 3(%rdi), %r11d
+; SSE41-NEXT:    movzbl 4(%rdi), %r14d
+; SSE41-NEXT:    movzbl 5(%rdi), %r15d
+; SSE41-NEXT:    movzbl 6(%rdi), %r12d
+; SSE41-NEXT:    movzbl 7(%rdi), %r13d
+; SSE41-NEXT:    movzbl 8(%rdi), %r10d
+; SSE41-NEXT:    movzbl 9(%rdi), %r8d
+; SSE41-NEXT:    movzbl 10(%rdi), %ecx
+; SSE41-NEXT:    movzbl 11(%rdi), %edx
+; SSE41-NEXT:    movzbl 12(%rdi), %esi
+; SSE41-NEXT:    movzbl 13(%rdi), %ebp
 ; SSE41-NEXT:    movzbl 14(%rdi), %eax
+; SSE41-NEXT:    movzbl 15(%rdi), %edi
+; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE41-NEXT:    movzbl -24(%rsp,%r9), %r9d
+; SSE41-NEXT:    movd %r9d, %xmm0
+; SSE41-NEXT:    andl $15, %ebx
+; SSE41-NEXT:    pinsrb $1, -24(%rsp,%rbx), %xmm0
+; SSE41-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; SSE41-NEXT:    andl $15, %ebx
+; SSE41-NEXT:    pinsrb $2, -24(%rsp,%rbx), %xmm0
+; SSE41-NEXT:    andl $15, %r11d
+; SSE41-NEXT:    pinsrb $3, -24(%rsp,%r11), %xmm0
+; SSE41-NEXT:    andl $15, %r14d
+; SSE41-NEXT:    pinsrb $4, -24(%rsp,%r14), %xmm0
+; SSE41-NEXT:    andl $15, %r15d
+; SSE41-NEXT:    pinsrb $5, -24(%rsp,%r15), %xmm0
+; SSE41-NEXT:    andl $15, %r12d
+; SSE41-NEXT:    pinsrb $6, -24(%rsp,%r12), %xmm0
+; SSE41-NEXT:    andl $15, %r13d
+; SSE41-NEXT:    pinsrb $7, -24(%rsp,%r13), %xmm0
+; SSE41-NEXT:    andl $15, %r10d
+; SSE41-NEXT:    pinsrb $8, -24(%rsp,%r10), %xmm0
+; SSE41-NEXT:    andl $15, %r8d
+; SSE41-NEXT:    pinsrb $9, -24(%rsp,%r8), %xmm0
+; SSE41-NEXT:    andl $15, %ecx
+; SSE41-NEXT:    pinsrb $10, -24(%rsp,%rcx), %xmm0
+; SSE41-NEXT:    andl $15, %edx
+; SSE41-NEXT:    pinsrb $11, -24(%rsp,%rdx), %xmm0
+; SSE41-NEXT:    andl $15, %esi
+; SSE41-NEXT:    pinsrb $12, -24(%rsp,%rsi), %xmm0
+; SSE41-NEXT:    andl $15, %ebp
+; SSE41-NEXT:    pinsrb $13, -24(%rsp,%rbp), %xmm0
 ; SSE41-NEXT:    andl $15, %eax
 ; SSE41-NEXT:    pinsrb $14, -24(%rsp,%rax), %xmm0
-; SSE41-NEXT:    movzbl 15(%rdi), %eax
-; SSE41-NEXT:    andl $15, %eax
-; SSE41-NEXT:    pinsrb $15, -24(%rsp,%rax), %xmm0
+; SSE41-NEXT:    andl $15, %edi
+; SSE41-NEXT:    pinsrb $15, -24(%rsp,%rdi), %xmm0
+; SSE41-NEXT:    popq %rbx
+; SSE41-NEXT:    popq %r12
+; SSE41-NEXT:    popq %r13
+; SSE41-NEXT:    popq %r14
+; SSE41-NEXT:    popq %r15
+; SSE41-NEXT:    popq %rbp
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    movzbl (%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX-NEXT:    movzbl -24(%rsp,%rax), %eax
-; AVX-NEXT:    vmovd %eax, %xmm0
-; AVX-NEXT:    movzbl 1(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $1, -24(%rsp,%rax), %xmm0, %xmm0
+; AVX-NEXT:    pushq %rbp
+; AVX-NEXT:    pushq %r15
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %r13
+; AVX-NEXT:    pushq %r12
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movzbl (%rdi), %r9d
+; AVX-NEXT:    andl $15, %r9d
+; AVX-NEXT:    movzbl 1(%rdi), %ebx
 ; AVX-NEXT:    movzbl 2(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $2, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 3(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $3, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 4(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $4, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 5(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $5, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 6(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $6, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 7(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $7, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 8(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $8, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 9(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $9, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 10(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $10, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 11(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $11, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 12(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $12, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 13(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $13, -24(%rsp,%rax), %xmm0, %xmm0
+; AVX-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX-NEXT:    movzbl 3(%rdi), %r11d
+; AVX-NEXT:    movzbl 4(%rdi), %r14d
+; AVX-NEXT:    movzbl 5(%rdi), %r15d
+; AVX-NEXT:    movzbl 6(%rdi), %r12d
+; AVX-NEXT:    movzbl 7(%rdi), %r13d
+; AVX-NEXT:    movzbl 8(%rdi), %r10d
+; AVX-NEXT:    movzbl 9(%rdi), %r8d
+; AVX-NEXT:    movzbl 10(%rdi), %ecx
+; AVX-NEXT:    movzbl 11(%rdi), %edx
+; AVX-NEXT:    movzbl 12(%rdi), %esi
+; AVX-NEXT:    movzbl 13(%rdi), %ebp
 ; AVX-NEXT:    movzbl 14(%rdi), %eax
+; AVX-NEXT:    movzbl 15(%rdi), %edi
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movzbl -24(%rsp,%r9), %r9d
+; AVX-NEXT:    vmovd %r9d, %xmm0
+; AVX-NEXT:    andl $15, %ebx
+; AVX-NEXT:    vpinsrb $1, -24(%rsp,%rbx), %xmm0, %xmm0
+; AVX-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX-NEXT:    andl $15, %ebx
+; AVX-NEXT:    vpinsrb $2, -24(%rsp,%rbx), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %r11d
+; AVX-NEXT:    vpinsrb $3, -24(%rsp,%r11), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %r14d
+; AVX-NEXT:    vpinsrb $4, -24(%rsp,%r14), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %r15d
+; AVX-NEXT:    vpinsrb $5, -24(%rsp,%r15), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %r12d
+; AVX-NEXT:    vpinsrb $6, -24(%rsp,%r12), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %r13d
+; AVX-NEXT:    vpinsrb $7, -24(%rsp,%r13), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %r10d
+; AVX-NEXT:    vpinsrb $8, -24(%rsp,%r10), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %r8d
+; AVX-NEXT:    vpinsrb $9, -24(%rsp,%r8), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %ecx
+; AVX-NEXT:    vpinsrb $10, -24(%rsp,%rcx), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %edx
+; AVX-NEXT:    vpinsrb $11, -24(%rsp,%rdx), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %esi
+; AVX-NEXT:    vpinsrb $12, -24(%rsp,%rsi), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %ebp
+; AVX-NEXT:    vpinsrb $13, -24(%rsp,%rbp), %xmm0, %xmm0
 ; AVX-NEXT:    andl $15, %eax
 ; AVX-NEXT:    vpinsrb $14, -24(%rsp,%rax), %xmm0, %xmm0
-; AVX-NEXT:    movzbl 15(%rdi), %eax
-; AVX-NEXT:    andl $15, %eax
-; AVX-NEXT:    vpinsrb $15, -24(%rsp,%rax), %xmm0, %xmm0
+; AVX-NEXT:    andl $15, %edi
+; AVX-NEXT:    vpinsrb $15, -24(%rsp,%rdi), %xmm0, %xmm0
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r12
+; AVX-NEXT:    popq %r13
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %r15
+; AVX-NEXT:    popq %rbp
 ; AVX-NEXT:    retq
   %p0  = getelementptr inbounds i8, i8* %i, i64 0
   %p1  = getelementptr inbounds i8, i8* %i, i64 1

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll?rev=332118&r1=332117&r2=332118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll Fri May 11 11:40:08 2018
@@ -189,6 +189,10 @@ define <8 x float> @var_shuffle_v8f32_v8
 ; ALL-NEXT:    # kill: def $edx killed $edx def $rdx
 ; ALL-NEXT:    # kill: def $esi killed $esi def $rsi
 ; ALL-NEXT:    # kill: def $edi killed $edi def $rdi
+; ALL-NEXT:    movl 24(%rbp), %r10d
+; ALL-NEXT:    andl $7, %r10d
+; ALL-NEXT:    movl 16(%rbp), %eax
+; ALL-NEXT:    andl $7, %eax
 ; ALL-NEXT:    andl $7, %edi
 ; ALL-NEXT:    andl $7, %esi
 ; ALL-NEXT:    andl $7, %edx
@@ -196,10 +200,6 @@ define <8 x float> @var_shuffle_v8f32_v8
 ; ALL-NEXT:    andl $7, %r8d
 ; ALL-NEXT:    vmovaps %ymm0, (%rsp)
 ; ALL-NEXT:    andl $7, %r9d
-; ALL-NEXT:    movl 16(%rbp), %r10d
-; ALL-NEXT:    andl $7, %r10d
-; ALL-NEXT:    movl 24(%rbp), %eax
-; ALL-NEXT:    andl $7, %eax
 ; ALL-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
@@ -240,6 +240,10 @@ define <8 x float> @var_shuffle_v8f32_v4
 ; ALL-NEXT:    # kill: def $edx killed $edx def $rdx
 ; ALL-NEXT:    # kill: def $esi killed $esi def $rsi
 ; ALL-NEXT:    # kill: def $edi killed $edi def $rdi
+; ALL-NEXT:    movl {{[0-9]+}}(%rsp), %r10d
+; ALL-NEXT:    andl $3, %r10d
+; ALL-NEXT:    movl {{[0-9]+}}(%rsp), %eax
+; ALL-NEXT:    andl $3, %eax
 ; ALL-NEXT:    andl $3, %edi
 ; ALL-NEXT:    andl $3, %esi
 ; ALL-NEXT:    andl $3, %edx
@@ -247,10 +251,6 @@ define <8 x float> @var_shuffle_v8f32_v4
 ; ALL-NEXT:    andl $3, %r8d
 ; ALL-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    andl $3, %r9d
-; ALL-NEXT:    movl {{[0-9]+}}(%rsp), %r10d
-; ALL-NEXT:    andl $3, %r10d
-; ALL-NEXT:    movl {{[0-9]+}}(%rsp), %eax
-; ALL-NEXT:    andl $3, %eax
 ; ALL-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]

Modified: llvm/trunk/test/CodeGen/X86/widen_arith-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-4.ll?rev=332118&r1=332117&r2=332118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-4.ll Fri May 11 11:40:08 2018
@@ -19,15 +19,15 @@ define void @update(<5 x i16>* %dst, <5
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB0_2: # %forbody
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
-; CHECK-NEXT:    movslq -{{[0-9]+}}(%rsp), %rcx
-; CHECK-NEXT:    shlq $4, %rcx
+; CHECK-NEXT:    movslq -{{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT:    shlq $4, %rax
 ; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rdx
-; CHECK-NEXT:    movdqa (%rdx,%rcx), %xmm2
+; CHECK-NEXT:    movdqa (%rdx,%rax), %xmm2
 ; CHECK-NEXT:    psubw %xmm0, %xmm2
 ; CHECK-NEXT:    pmullw %xmm1, %xmm2
-; CHECK-NEXT:    pextrw $4, %xmm2, 8(%rax,%rcx)
-; CHECK-NEXT:    movq %xmm2, (%rax,%rcx)
+; CHECK-NEXT:    pextrw $4, %xmm2, 8(%rcx,%rax)
+; CHECK-NEXT:    movq %xmm2, (%rcx,%rax)
 ; CHECK-NEXT:    incl -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:  .LBB0_1: # %forcond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1

Modified: llvm/trunk/test/CodeGen/X86/widen_arith-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-5.ll?rev=332118&r1=332117&r2=332118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-5.ll Fri May 11 11:40:08 2018
@@ -18,15 +18,15 @@ define void @update(<3 x i32>* %dst, <3
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB0_2: # %forbody
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rax
-; CHECK-NEXT:    movslq -{{[0-9]+}}(%rsp), %rcx
-; CHECK-NEXT:    shlq $4, %rcx
+; CHECK-NEXT:    movslq -{{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT:    shlq $4, %rax
 ; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rdx
-; CHECK-NEXT:    movdqa (%rdx,%rcx), %xmm1
+; CHECK-NEXT:    movdqa (%rdx,%rax), %xmm1
 ; CHECK-NEXT:    pslld $2, %xmm1
 ; CHECK-NEXT:    psubd %xmm0, %xmm1
-; CHECK-NEXT:    pextrd $2, %xmm1, 8(%rax,%rcx)
-; CHECK-NEXT:    movq %xmm1, (%rax,%rcx)
+; CHECK-NEXT:    pextrd $2, %xmm1, 8(%rcx,%rax)
+; CHECK-NEXT:    movq %xmm1, (%rcx,%rax)
 ; CHECK-NEXT:    incl -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:  .LBB0_1: # %forcond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1




More information about the llvm-commits mailing list